aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig37
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/atl1c/atl1c_hw.c15
-rw-r--r--drivers/net/atl1c/atl1c_hw.h43
-rw-r--r--drivers/net/atl1c/atl1c_main.c5
-rw-r--r--drivers/net/atl1e/atl1e_ethtool.c12
-rw-r--r--drivers/net/atl1e/atl1e_hw.c34
-rw-r--r--drivers/net/atl1e/atl1e_hw.h111
-rw-r--r--drivers/net/atl1e/atl1e_main.c10
-rw-r--r--drivers/net/atlx/atl1.c77
-rw-r--r--drivers/net/atlx/atl2.c2
-rw-r--r--drivers/net/ax88796.c810
-rw-r--r--drivers/net/benet/be.h16
-rw-r--r--drivers/net/benet/be_cmds.c145
-rw-r--r--drivers/net/benet/be_cmds.h61
-rw-r--r--drivers/net/benet/be_ethtool.c77
-rw-r--r--drivers/net/benet/be_hw.h47
-rw-r--r--drivers/net/benet/be_main.c239
-rw-r--r--drivers/net/bna/bnad.c108
-rw-r--r--drivers/net/bna/bnad.h2
-rw-r--r--drivers/net/bnx2.c8
-rw-r--r--drivers/net/bnx2.h4
-rw-r--r--drivers/net/bnx2x/bnx2x.h35
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c70
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h6
-rw-r--r--drivers/net/bnx2x/bnx2x_dcb.c137
-rw-r--r--drivers/net/bnx2x/bnx2x_dcb.h5
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c58
-rw-r--r--drivers/net/bnx2x/bnx2x_hsi.h114
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c2527
-rw-r--r--drivers/net/bnx2x/bnx2x_link.h34
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c598
-rw-r--r--drivers/net/bnx2x/bnx2x_reg.h1
-rw-r--r--drivers/net/bonding/bond_alb.c2
-rw-r--r--drivers/net/bonding/bond_main.c259
-rw-r--r--drivers/net/bonding/bond_sysfs.c4
-rw-r--r--drivers/net/bonding/bonding.h27
-rw-r--r--drivers/net/can/Kconfig2
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/c_can/Kconfig15
-rw-r--r--drivers/net/can/c_can/Makefile8
-rw-r--r--drivers/net/can/c_can/c_can.c1158
-rw-r--r--drivers/net/can/c_can/c_can.h86
-rw-r--r--drivers/net/can/c_can/c_can_platform.c215
-rw-r--r--drivers/net/cnic.c191
-rw-r--r--drivers/net/cnic.h2
-rw-r--r--drivers/net/cnic_if.h8
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c5
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c1
-rw-r--r--drivers/net/dm9000.c7
-rw-r--r--drivers/net/e1000e/e1000.h5
-rw-r--r--drivers/net/e1000e/ethtool.c71
-rw-r--r--drivers/net/e1000e/ich8lan.c3
-rw-r--r--drivers/net/e1000e/lib.c4
-rw-r--r--drivers/net/e1000e/netdev.c123
-rw-r--r--drivers/net/e1000e/phy.c8
-rw-r--r--drivers/net/enic/Makefile2
-rw-r--r--drivers/net/enic/enic.h11
-rw-r--r--drivers/net/enic/enic_dev.c221
-rw-r--r--drivers/net/enic/enic_dev.h41
-rw-r--r--drivers/net/enic/enic_main.c326
-rw-r--r--drivers/net/enic/vnic_dev.c19
-rw-r--r--drivers/net/enic/vnic_dev.h8
-rw-r--r--drivers/net/enic/vnic_rq.h5
-rw-r--r--drivers/net/eql.c10
-rw-r--r--drivers/net/fec.c650
-rw-r--r--drivers/net/ftmac100.c1196
-rw-r--r--drivers/net/ftmac100.h180
-rw-r--r--drivers/net/hamradio/bpqether.c5
-rw-r--r--drivers/net/igb/e1000_82575.c11
-rw-r--r--drivers/net/igb/e1000_defines.h7
-rw-r--r--drivers/net/igb/e1000_hw.h1
-rw-r--r--drivers/net/igb/e1000_mbx.c38
-rw-r--r--drivers/net/igb/e1000_regs.h4
-rw-r--r--drivers/net/igb/igb.h2
-rw-r--r--drivers/net/igb/igb_ethtool.c13
-rw-r--r--drivers/net/igb/igb_main.c109
-rw-r--r--drivers/net/igbvf/ethtool.c6
-rw-r--r--drivers/net/igbvf/igbvf.h3
-rw-r--r--drivers/net/igbvf/netdev.c63
-rw-r--r--drivers/net/ipg.c4
-rw-r--r--drivers/net/ixgbe/ixgbe.h7
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c63
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c104
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c438
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h8
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.c179
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.h12
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.c96
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.h25
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.c117
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.h26
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c213
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c36
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c52
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.c4
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.h4
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c238
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c31
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h42
-rw-r--r--drivers/net/ixgbe/ixgbe_x540.c34
-rw-r--r--drivers/net/ixgbevf/defines.h2
-rw-r--r--drivers/net/ixgbevf/ixgbevf_main.c46
-rw-r--r--drivers/net/jme.c306
-rw-r--r--drivers/net/jme.h87
-rw-r--r--drivers/net/loopback.c9
-rw-r--r--drivers/net/macvtap.c18
-rw-r--r--drivers/net/mii.c14
-rw-r--r--drivers/net/mv643xx_eth.c74
-rw-r--r--drivers/net/myri10ge/myri10ge.c4
-rw-r--r--drivers/net/phy/Kconfig1
-rw-r--r--drivers/net/phy/micrel.c24
-rw-r--r--drivers/net/ppp_generic.c148
-rw-r--r--drivers/net/pptp.c8
-rw-r--r--drivers/net/qla3xxx.c10
-rw-r--r--drivers/net/qlcnic/qlcnic.h5
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c15
-rw-r--r--drivers/net/r6040.c115
-rw-r--r--drivers/net/sfc/efx.c82
-rw-r--r--drivers/net/sfc/efx.h19
-rw-r--r--drivers/net/sfc/ethtool.c37
-rw-r--r--drivers/net/sfc/falcon.c22
-rw-r--r--drivers/net/sfc/falcon_boards.c2
-rw-r--r--drivers/net/sfc/falcon_xmac.c2
-rw-r--r--drivers/net/sfc/filter.c117
-rw-r--r--drivers/net/sfc/io.h2
-rw-r--r--drivers/net/sfc/mcdi.c23
-rw-r--r--drivers/net/sfc/mcdi.h4
-rw-r--r--drivers/net/sfc/mcdi_mac.c2
-rw-r--r--drivers/net/sfc/mcdi_pcol.h2
-rw-r--r--drivers/net/sfc/mcdi_phy.c2
-rw-r--r--drivers/net/sfc/mdio_10g.c34
-rw-r--r--drivers/net/sfc/mdio_10g.h5
-rw-r--r--drivers/net/sfc/mtd.c2
-rw-r--r--drivers/net/sfc/net_driver.h83
-rw-r--r--drivers/net/sfc/nic.c73
-rw-r--r--drivers/net/sfc/nic.h9
-rw-r--r--drivers/net/sfc/phy.h2
-rw-r--r--drivers/net/sfc/qt202x_phy.c2
-rw-r--r--drivers/net/sfc/regs.h8
-rw-r--r--drivers/net/sfc/rx.c144
-rw-r--r--drivers/net/sfc/selftest.c4
-rw-r--r--drivers/net/sfc/selftest.h2
-rw-r--r--drivers/net/sfc/siena.c24
-rw-r--r--drivers/net/sfc/spi.h2
-rw-r--r--drivers/net/sfc/tenxpress.c4
-rw-r--r--drivers/net/sfc/tx.c92
-rw-r--r--drivers/net/sfc/txc43128_phy.c4
-rw-r--r--drivers/net/sfc/workarounds.h2
-rw-r--r--drivers/net/sh_eth.c208
-rw-r--r--drivers/net/sis900.c4
-rw-r--r--drivers/net/smc91x.c13
-rw-r--r--drivers/net/sungem.c58
-rw-r--r--drivers/net/sungem.h1
-rw-r--r--drivers/net/tg3.c161
-rw-r--r--drivers/net/tg3.h13
-rw-r--r--drivers/net/tlan.c3840
-rw-r--r--drivers/net/tlan.h192
-rw-r--r--drivers/net/tun.c85
-rw-r--r--drivers/net/typhoon.c3
-rw-r--r--drivers/net/veth.c12
-rw-r--r--drivers/net/via-velocity.c9
-rw-r--r--drivers/net/via-velocity.h8
-rw-r--r--drivers/net/vxge/vxge-config.c32
-rw-r--r--drivers/net/vxge/vxge-config.h10
-rw-r--r--drivers/net/vxge/vxge-main.c234
-rw-r--r--drivers/net/vxge/vxge-main.h23
-rw-r--r--drivers/net/vxge/vxge-traffic.c116
-rw-r--r--drivers/net/vxge/vxge-traffic.h14
-rw-r--r--drivers/net/vxge/vxge-version.h4
-rw-r--r--drivers/net/wireless/ath/ar9170/Kconfig4
-rw-r--r--drivers/net/wireless/ath/ar9170/main.c3
-rw-r--r--drivers/net/wireless/ath/ath.h2
-rw-r--r--drivers/net/wireless/ath/ath5k/Kconfig11
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c7
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h38
-rw-r--r--drivers/net/wireless/ath/ath5k/attach.c7
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c114
-rw-r--r--drivers/net/wireless/ath/ath5k/base.h3
-rw-r--r--drivers/net/wireless/ath/ath5k/caps.c48
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c20
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.h10
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c24
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.h28
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c26
-rw-r--r--drivers/net/wireless/ath/ath5k/pci.c9
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c46
-rw-r--r--drivers/net/wireless/ath/ath5k/reg.h15
-rw-r--r--drivers/net/wireless/ath/ath5k/trace.h107
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c26
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c112
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9485_initvals.h1141
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h115
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c82
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c440
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h17
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c32
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c41
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c45
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c38
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_gpio.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c29
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c35
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c65
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c35
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c755
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c20
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c171
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h21
-rw-r--r--drivers/net/wireless/ath/ath9k/virtual.c717
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c308
-rw-r--r--drivers/net/wireless/ath/carl9170/carl9170.h1
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c15
-rw-r--r--drivers/net/wireless/ath/carl9170/fwcmd.h1
-rw-r--r--drivers/net/wireless/ath/carl9170/fwdesc.h28
-rw-r--r--drivers/net/wireless/ath/carl9170/hw.h25
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c9
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c3
-rw-r--r--drivers/net/wireless/ath/carl9170/version.h8
-rw-r--r--drivers/net/wireless/ath/carl9170/wlan.h20
-rw-r--r--drivers/net/wireless/ath/key.c5
-rw-r--r--drivers/net/wireless/ath/regd.c7
-rw-r--r--drivers/net/wireless/ath/regd.h1
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c2
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig26
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c561
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-led.c27
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c16
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c39
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c55
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-calib.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-led.c14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-led.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c91
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c29
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rx.c15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rxon.c43
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ucode.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c339
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h30
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c27
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h28
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h16
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c117
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h61
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h26
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c201
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.h16
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-legacy.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c37
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c27
-rw-r--r--drivers/net/wireless/iwmc3200wifi/cfg80211.c3
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c7
-rw-r--r--drivers/net/wireless/libertas/cfg.c6
-rw-r--r--drivers/net/wireless/libertas/cmd.c10
-rw-r--r--drivers/net/wireless/libertas/dev.h2
-rw-r--r--drivers/net/wireless/libertas/if_spi.c368
-rw-r--r--drivers/net/wireless/libertas/main.c77
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c3
-rw-r--r--drivers/net/wireless/mwl8k.c456
-rw-r--r--drivers/net/wireless/p54/eeprom.c208
-rw-r--r--drivers/net/wireless/p54/eeprom.h7
-rw-r--r--drivers/net/wireless/p54/fwio.c12
-rw-r--r--drivers/net/wireless/p54/lmac.h1
-rw-r--r--drivers/net/wireless/p54/main.c47
-rw-r--r--drivers/net/wireless/p54/p54.h6
-rw-r--r--drivers/net/wireless/p54/p54spi_eeprom.h9
-rw-r--r--drivers/net/wireless/p54/txrx.c6
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig12
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c165
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c159
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h139
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c848
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c232
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h39
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c74
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00ht.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h24
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00link.c7
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c50
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c7
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c69
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00reg.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c8
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c238
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c61
-rw-r--r--drivers/net/wireless/rtlwifi/Kconfig17
-rw-r--r--drivers/net/wireless/rtlwifi/Makefile6
-rw-r--r--drivers/net/wireless/rtlwifi/base.c76
-rw-r--r--drivers/net/wireless/rtlwifi/base.h39
-rw-r--r--drivers/net/wireless/rtlwifi/core.c17
-rw-r--r--drivers/net/wireless/rtlwifi/debug.h1
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c152
-rw-r--r--drivers/net/wireless/rtlwifi/pci.h12
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c58
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c1388
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c2049
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/Makefile2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/def.h144
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/dm.c1361
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/dm.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/fw.c59
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c153
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/led.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/phy.c2052
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/phy.h7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/reg.h73
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.c7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.c170
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.h464
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/Makefile15
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/def.h62
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/dm.c116
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/dm.h32
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/fw.c30
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/fw.h30
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c2505
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.h107
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/led.c142
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/led.h37
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.c1144
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.h180
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/phy.c611
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/phy.h34
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/reg.h30
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/rf.c493
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/rf.h30
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c327
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.h35
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/table.c1888
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/table.h71
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c684
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.h430
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c1035
-rw-r--r--drivers/net/wireless/rtlwifi/usb.h164
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h668
-rw-r--r--drivers/net/wireless/wl1251/acx.c53
-rw-r--r--drivers/net/wireless/wl1251/acx.h72
-rw-r--r--drivers/net/wireless/wl1251/event.c18
-rw-r--r--drivers/net/wireless/wl1251/main.c18
-rw-r--r--drivers/net/wireless/wl1251/ps.c11
-rw-r--r--drivers/net/wireless/wl1251/rx.c49
-rw-r--r--drivers/net/wireless/wl1251/tx.c74
-rw-r--r--drivers/net/wireless/wl1251/wl1251.h7
-rw-r--r--drivers/net/wireless/wl12xx/Kconfig2
-rw-r--r--drivers/net/wireless/wl12xx/acx.c246
-rw-r--r--drivers/net/wireless/wl12xx/acx.h132
-rw-r--r--drivers/net/wireless/wl12xx/boot.c35
-rw-r--r--drivers/net/wireless/wl12xx/cmd.c318
-rw-r--r--drivers/net/wireless/wl12xx/cmd.h161
-rw-r--r--drivers/net/wireless/wl12xx/conf.h125
-rw-r--r--drivers/net/wireless/wl12xx/debugfs.c49
-rw-r--r--drivers/net/wireless/wl12xx/event.c21
-rw-r--r--drivers/net/wireless/wl12xx/event.h10
-rw-r--r--drivers/net/wireless/wl12xx/init.c400
-rw-r--r--drivers/net/wireless/wl12xx/init.h2
-rw-r--r--drivers/net/wireless/wl12xx/main.c1191
-rw-r--r--drivers/net/wireless/wl12xx/ps.c6
-rw-r--r--drivers/net/wireless/wl12xx/rx.c22
-rw-r--r--drivers/net/wireless/wl12xx/rx.h13
-rw-r--r--drivers/net/wireless/wl12xx/sdio.c1
-rw-r--r--drivers/net/wireless/wl12xx/spi.c2
-rw-r--r--drivers/net/wireless/wl12xx/tx.c125
-rw-r--r--drivers/net/wireless/wl12xx/tx.h10
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx.h152
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx_80211.h11
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c169
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.h5
-rw-r--r--drivers/net/wireless/zd1211rw/zd_def.h2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c448
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.h24
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c597
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.h37
405 files changed, 37828 insertions, 15355 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 03823327db2..fba89ae2926 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -238,8 +238,8 @@ source "drivers/net/arm/Kconfig"
238config AX88796 238config AX88796
239 tristate "ASIX AX88796 NE2000 clone support" 239 tristate "ASIX AX88796 NE2000 clone support"
240 depends on ARM || MIPS || SUPERH 240 depends on ARM || MIPS || SUPERH
241 select CRC32 241 select PHYLIB
242 select MII 242 select MDIO_BITBANG
243 help 243 help
244 AX88796 driver, using platform bus to provide 244 AX88796 driver, using platform bus to provide
245 chip detection and resources 245 chip detection and resources
@@ -1944,7 +1944,8 @@ config 68360_ENET
1944config FEC 1944config FEC
1945 bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)" 1945 bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
1946 depends on M523x || M527x || M5272 || M528x || M520x || M532x || \ 1946 depends on M523x || M527x || M5272 || M528x || M520x || M532x || \
1947 MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5 || SOC_IMX28 1947 IMX_HAVE_PLATFORM_FEC || MXS_HAVE_PLATFORM_FEC
1948 default IMX_HAVE_PLATFORM_FEC || MXS_HAVE_PLATFORM_FEC if ARM
1948 select PHYLIB 1949 select PHYLIB
1949 help 1950 help
1950 Say Y here if you want to use the built-in 10/100 Fast ethernet 1951 Say Y here if you want to use the built-in 10/100 Fast ethernet
@@ -2007,6 +2008,15 @@ config BCM63XX_ENET
2007 This driver supports the ethernet MACs in the Broadcom 63xx 2008 This driver supports the ethernet MACs in the Broadcom 63xx
2008 MIPS chipset family (BCM63XX). 2009 MIPS chipset family (BCM63XX).
2009 2010
2011config FTMAC100
2012 tristate "Faraday FTMAC100 10/100 Ethernet support"
2013 depends on ARM
2014 select MII
2015 help
2016 This driver supports the FTMAC100 10/100 Ethernet controller
2017 from Faraday. It is used on Faraday A320, Andes AG101 and some
2018 other ARM/NDS32 SoC's.
2019
2010source "drivers/net/fs_enet/Kconfig" 2020source "drivers/net/fs_enet/Kconfig"
2011 2021
2012source "drivers/net/octeon/Kconfig" 2022source "drivers/net/octeon/Kconfig"
@@ -2594,14 +2604,9 @@ config CHELSIO_T1_1G
2594 Enables support for Chelsio's gigabit Ethernet PCI cards. If you 2604 Enables support for Chelsio's gigabit Ethernet PCI cards. If you
2595 are using only 10G cards say 'N' here. 2605 are using only 10G cards say 'N' here.
2596 2606
2597config CHELSIO_T3_DEPENDS
2598 tristate
2599 depends on PCI && INET
2600 default y
2601
2602config CHELSIO_T3 2607config CHELSIO_T3
2603 tristate "Chelsio Communications T3 10Gb Ethernet support" 2608 tristate "Chelsio Communications T3 10Gb Ethernet support"
2604 depends on CHELSIO_T3_DEPENDS 2609 depends on PCI && INET
2605 select FW_LOADER 2610 select FW_LOADER
2606 select MDIO 2611 select MDIO
2607 help 2612 help
@@ -2619,14 +2624,9 @@ config CHELSIO_T3
2619 To compile this driver as a module, choose M here: the module 2624 To compile this driver as a module, choose M here: the module
2620 will be called cxgb3. 2625 will be called cxgb3.
2621 2626
2622config CHELSIO_T4_DEPENDS
2623 tristate
2624 depends on PCI && INET
2625 default y
2626
2627config CHELSIO_T4 2627config CHELSIO_T4
2628 tristate "Chelsio Communications T4 Ethernet support" 2628 tristate "Chelsio Communications T4 Ethernet support"
2629 depends on CHELSIO_T4_DEPENDS 2629 depends on PCI
2630 select FW_LOADER 2630 select FW_LOADER
2631 select MDIO 2631 select MDIO
2632 help 2632 help
@@ -2644,14 +2644,9 @@ config CHELSIO_T4
2644 To compile this driver as a module choose M here; the module 2644 To compile this driver as a module choose M here; the module
2645 will be called cxgb4. 2645 will be called cxgb4.
2646 2646
2647config CHELSIO_T4VF_DEPENDS
2648 tristate
2649 depends on PCI && INET
2650 default y
2651
2652config CHELSIO_T4VF 2647config CHELSIO_T4VF
2653 tristate "Chelsio Communications T4 Virtual Function Ethernet support" 2648 tristate "Chelsio Communications T4 Virtual Function Ethernet support"
2654 depends on CHELSIO_T4VF_DEPENDS 2649 depends on PCI
2655 help 2650 help
2656 This driver supports Chelsio T4-based gigabit and 10Gb Ethernet 2651 This driver supports Chelsio T4-based gigabit and 10Gb Ethernet
2657 adapters with PCI-E SR-IOV Virtual Functions. 2652 adapters with PCI-E SR-IOV Virtual Functions.
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index b90738d1399..7c2171179f9 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -147,6 +147,7 @@ obj-$(CONFIG_FORCEDETH) += forcedeth.o
147obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o 147obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o
148obj-$(CONFIG_AX88796) += ax88796.o 148obj-$(CONFIG_AX88796) += ax88796.o
149obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o 149obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o
150obj-$(CONFIG_FTMAC100) += ftmac100.o
150 151
151obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o 152obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o
152obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o 153obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
diff --git a/drivers/net/atl1c/atl1c_hw.c b/drivers/net/atl1c/atl1c_hw.c
index 1bf67200994..23f2ab0f2fa 100644
--- a/drivers/net/atl1c/atl1c_hw.c
+++ b/drivers/net/atl1c/atl1c_hw.c
@@ -345,7 +345,7 @@ int atl1c_write_phy_reg(struct atl1c_hw *hw, u32 reg_addr, u16 phy_data)
345 */ 345 */
346static int atl1c_phy_setup_adv(struct atl1c_hw *hw) 346static int atl1c_phy_setup_adv(struct atl1c_hw *hw)
347{ 347{
348 u16 mii_adv_data = ADVERTISE_DEFAULT_CAP & ~ADVERTISE_SPEED_MASK; 348 u16 mii_adv_data = ADVERTISE_DEFAULT_CAP & ~ADVERTISE_ALL;
349 u16 mii_giga_ctrl_data = GIGA_CR_1000T_DEFAULT_CAP & 349 u16 mii_giga_ctrl_data = GIGA_CR_1000T_DEFAULT_CAP &
350 ~GIGA_CR_1000T_SPEED_MASK; 350 ~GIGA_CR_1000T_SPEED_MASK;
351 351
@@ -373,7 +373,7 @@ static int atl1c_phy_setup_adv(struct atl1c_hw *hw)
373 } 373 }
374 374
375 if (atl1c_write_phy_reg(hw, MII_ADVERTISE, mii_adv_data) != 0 || 375 if (atl1c_write_phy_reg(hw, MII_ADVERTISE, mii_adv_data) != 0 ||
376 atl1c_write_phy_reg(hw, MII_GIGA_CR, mii_giga_ctrl_data) != 0) 376 atl1c_write_phy_reg(hw, MII_CTRL1000, mii_giga_ctrl_data) != 0)
377 return -1; 377 return -1;
378 return 0; 378 return 0;
379} 379}
@@ -517,19 +517,18 @@ int atl1c_phy_init(struct atl1c_hw *hw)
517 "Error Setting up Auto-Negotiation\n"); 517 "Error Setting up Auto-Negotiation\n");
518 return ret_val; 518 return ret_val;
519 } 519 }
520 mii_bmcr_data |= BMCR_AUTO_NEG_EN | BMCR_RESTART_AUTO_NEG; 520 mii_bmcr_data |= BMCR_ANENABLE | BMCR_ANRESTART;
521 break; 521 break;
522 case MEDIA_TYPE_100M_FULL: 522 case MEDIA_TYPE_100M_FULL:
523 mii_bmcr_data |= BMCR_SPEED_100 | BMCR_FULL_DUPLEX; 523 mii_bmcr_data |= BMCR_SPEED100 | BMCR_FULLDPLX;
524 break; 524 break;
525 case MEDIA_TYPE_100M_HALF: 525 case MEDIA_TYPE_100M_HALF:
526 mii_bmcr_data |= BMCR_SPEED_100; 526 mii_bmcr_data |= BMCR_SPEED100;
527 break; 527 break;
528 case MEDIA_TYPE_10M_FULL: 528 case MEDIA_TYPE_10M_FULL:
529 mii_bmcr_data |= BMCR_SPEED_10 | BMCR_FULL_DUPLEX; 529 mii_bmcr_data |= BMCR_FULLDPLX;
530 break; 530 break;
531 case MEDIA_TYPE_10M_HALF: 531 case MEDIA_TYPE_10M_HALF:
532 mii_bmcr_data |= BMCR_SPEED_10;
533 break; 532 break;
534 default: 533 default:
535 if (netif_msg_link(adapter)) 534 if (netif_msg_link(adapter))
@@ -657,7 +656,7 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw)
657 err = atl1c_phy_setup_adv(hw); 656 err = atl1c_phy_setup_adv(hw);
658 if (err) 657 if (err)
659 return err; 658 return err;
660 mii_bmcr_data |= BMCR_AUTO_NEG_EN | BMCR_RESTART_AUTO_NEG; 659 mii_bmcr_data |= BMCR_ANENABLE | BMCR_ANRESTART;
661 660
662 return atl1c_write_phy_reg(hw, MII_BMCR, mii_bmcr_data); 661 return atl1c_write_phy_reg(hw, MII_BMCR, mii_bmcr_data);
663} 662}
diff --git a/drivers/net/atl1c/atl1c_hw.h b/drivers/net/atl1c/atl1c_hw.h
index 3dd675979aa..655fc6c4a8a 100644
--- a/drivers/net/atl1c/atl1c_hw.h
+++ b/drivers/net/atl1c/atl1c_hw.h
@@ -736,55 +736,16 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw);
736#define REG_DEBUG_DATA0 0x1900 736#define REG_DEBUG_DATA0 0x1900
737#define REG_DEBUG_DATA1 0x1904 737#define REG_DEBUG_DATA1 0x1904
738 738
739/* PHY Control Register */
740#define MII_BMCR 0x00
741#define BMCR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
742#define BMCR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
743#define BMCR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
744#define BMCR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
745#define BMCR_ISOLATE 0x0400 /* Isolate PHY from MII */
746#define BMCR_POWER_DOWN 0x0800 /* Power down */
747#define BMCR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
748#define BMCR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
749#define BMCR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
750#define BMCR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
751#define BMCR_SPEED_MASK 0x2040
752#define BMCR_SPEED_1000 0x0040
753#define BMCR_SPEED_100 0x2000
754#define BMCR_SPEED_10 0x0000
755
756/* PHY Status Register */
757#define MII_BMSR 0x01
758#define BMMSR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
759#define BMSR_JABBER_DETECT 0x0002 /* Jabber Detected */
760#define BMSR_LINK_STATUS 0x0004 /* Link Status 1 = link */
761#define BMSR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
762#define BMSR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
763#define BMSR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
764#define BMSR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
765#define BMSR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
766#define BMSR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
767#define BMSR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
768#define BMSR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
769#define BMSR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
770#define BMSR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
771#define BMMII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
772#define BMMII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
773
774#define MII_PHYSID1 0x02
775#define MII_PHYSID2 0x03
776#define L1D_MPW_PHYID1 0xD01C /* V7 */ 739#define L1D_MPW_PHYID1 0xD01C /* V7 */
777#define L1D_MPW_PHYID2 0xD01D /* V1-V6 */ 740#define L1D_MPW_PHYID2 0xD01D /* V1-V6 */
778#define L1D_MPW_PHYID3 0xD01E /* V8 */ 741#define L1D_MPW_PHYID3 0xD01E /* V8 */
779 742
780 743
781/* Autoneg Advertisement Register */ 744/* Autoneg Advertisement Register */
782#define MII_ADVERTISE 0x04 745#define ADVERTISE_DEFAULT_CAP \
783#define ADVERTISE_SPEED_MASK 0x01E0 746 (ADVERTISE_ALL | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)
784#define ADVERTISE_DEFAULT_CAP 0x0DE0
785 747
786/* 1000BASE-T Control Register */ 748/* 1000BASE-T Control Register */
787#define MII_GIGA_CR 0x09
788#define GIGA_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port 0=DTE device */ 749#define GIGA_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port 0=DTE device */
789 750
790#define GIGA_CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master 0=Configure PHY as Slave */ 751#define GIGA_CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master 0=Configure PHY as Slave */
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index 3824382faec..7d9d5067a65 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -1102,10 +1102,10 @@ static void atl1c_configure_tx(struct atl1c_adapter *adapter)
1102 AT_READ_REG(hw, REG_DEVICE_CTRL, &dev_ctrl_data); 1102 AT_READ_REG(hw, REG_DEVICE_CTRL, &dev_ctrl_data);
1103 max_pay_load = (dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT) & 1103 max_pay_load = (dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT) &
1104 DEVICE_CTRL_MAX_PAYLOAD_MASK; 1104 DEVICE_CTRL_MAX_PAYLOAD_MASK;
1105 hw->dmaw_block = min(max_pay_load, hw->dmaw_block); 1105 hw->dmaw_block = min_t(u32, max_pay_load, hw->dmaw_block);
1106 max_pay_load = (dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT) & 1106 max_pay_load = (dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT) &
1107 DEVICE_CTRL_MAX_RREQ_SZ_MASK; 1107 DEVICE_CTRL_MAX_RREQ_SZ_MASK;
1108 hw->dmar_block = min(max_pay_load, hw->dmar_block); 1108 hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block);
1109 1109
1110 txq_ctrl_data = (hw->tpd_burst & TXQ_NUM_TPD_BURST_MASK) << 1110 txq_ctrl_data = (hw->tpd_burst & TXQ_NUM_TPD_BURST_MASK) <<
1111 TXQ_NUM_TPD_BURST_SHIFT; 1111 TXQ_NUM_TPD_BURST_SHIFT;
@@ -2718,7 +2718,6 @@ static int __devinit atl1c_probe(struct pci_dev *pdev,
2718 goto err_reset; 2718 goto err_reset;
2719 } 2719 }
2720 2720
2721 device_init_wakeup(&pdev->dev, 1);
2722 /* reset the controller to 2721 /* reset the controller to
2723 * put the device in a known good starting state */ 2722 * put the device in a known good starting state */
2724 err = atl1c_phy_init(&adapter->hw); 2723 err = atl1c_phy_init(&adapter->hw);
diff --git a/drivers/net/atl1e/atl1e_ethtool.c b/drivers/net/atl1e/atl1e_ethtool.c
index 6943a6c3b94..1209297433b 100644
--- a/drivers/net/atl1e/atl1e_ethtool.c
+++ b/drivers/net/atl1e/atl1e_ethtool.c
@@ -95,18 +95,18 @@ static int atl1e_set_settings(struct net_device *netdev,
95 ecmd->advertising = hw->autoneg_advertised | 95 ecmd->advertising = hw->autoneg_advertised |
96 ADVERTISED_TP | ADVERTISED_Autoneg; 96 ADVERTISED_TP | ADVERTISED_Autoneg;
97 97
98 adv4 = hw->mii_autoneg_adv_reg & ~MII_AR_SPEED_MASK; 98 adv4 = hw->mii_autoneg_adv_reg & ~ADVERTISE_ALL;
99 adv9 = hw->mii_1000t_ctrl_reg & ~MII_AT001_CR_1000T_SPEED_MASK; 99 adv9 = hw->mii_1000t_ctrl_reg & ~MII_AT001_CR_1000T_SPEED_MASK;
100 if (hw->autoneg_advertised & ADVERTISE_10_HALF) 100 if (hw->autoneg_advertised & ADVERTISE_10_HALF)
101 adv4 |= MII_AR_10T_HD_CAPS; 101 adv4 |= ADVERTISE_10HALF;
102 if (hw->autoneg_advertised & ADVERTISE_10_FULL) 102 if (hw->autoneg_advertised & ADVERTISE_10_FULL)
103 adv4 |= MII_AR_10T_FD_CAPS; 103 adv4 |= ADVERTISE_10FULL;
104 if (hw->autoneg_advertised & ADVERTISE_100_HALF) 104 if (hw->autoneg_advertised & ADVERTISE_100_HALF)
105 adv4 |= MII_AR_100TX_HD_CAPS; 105 adv4 |= ADVERTISE_100HALF;
106 if (hw->autoneg_advertised & ADVERTISE_100_FULL) 106 if (hw->autoneg_advertised & ADVERTISE_100_FULL)
107 adv4 |= MII_AR_100TX_FD_CAPS; 107 adv4 |= ADVERTISE_100FULL;
108 if (hw->autoneg_advertised & ADVERTISE_1000_FULL) 108 if (hw->autoneg_advertised & ADVERTISE_1000_FULL)
109 adv9 |= MII_AT001_CR_1000T_FD_CAPS; 109 adv9 |= ADVERTISE_1000FULL;
110 110
111 if (adv4 != hw->mii_autoneg_adv_reg || 111 if (adv4 != hw->mii_autoneg_adv_reg ||
112 adv9 != hw->mii_1000t_ctrl_reg) { 112 adv9 != hw->mii_1000t_ctrl_reg) {
diff --git a/drivers/net/atl1e/atl1e_hw.c b/drivers/net/atl1e/atl1e_hw.c
index 76cc043def8..923063d2e5b 100644
--- a/drivers/net/atl1e/atl1e_hw.c
+++ b/drivers/net/atl1e/atl1e_hw.c
@@ -318,7 +318,7 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
318 * Advertisement Register (Address 4) and the 1000 mb speed bits in 318 * Advertisement Register (Address 4) and the 1000 mb speed bits in
319 * the 1000Base-T control Register (Address 9). 319 * the 1000Base-T control Register (Address 9).
320 */ 320 */
321 mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK; 321 mii_autoneg_adv_reg &= ~ADVERTISE_ALL;
322 mii_1000t_ctrl_reg &= ~MII_AT001_CR_1000T_SPEED_MASK; 322 mii_1000t_ctrl_reg &= ~MII_AT001_CR_1000T_SPEED_MASK;
323 323
324 /* 324 /*
@@ -327,44 +327,37 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
327 */ 327 */
328 switch (hw->media_type) { 328 switch (hw->media_type) {
329 case MEDIA_TYPE_AUTO_SENSOR: 329 case MEDIA_TYPE_AUTO_SENSOR:
330 mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS | 330 mii_autoneg_adv_reg |= ADVERTISE_ALL;
331 MII_AR_10T_FD_CAPS | 331 hw->autoneg_advertised = ADVERTISE_ALL;
332 MII_AR_100TX_HD_CAPS |
333 MII_AR_100TX_FD_CAPS);
334 hw->autoneg_advertised = ADVERTISE_10_HALF |
335 ADVERTISE_10_FULL |
336 ADVERTISE_100_HALF |
337 ADVERTISE_100_FULL;
338 if (hw->nic_type == athr_l1e) { 332 if (hw->nic_type == athr_l1e) {
339 mii_1000t_ctrl_reg |= 333 mii_1000t_ctrl_reg |= ADVERTISE_1000FULL;
340 MII_AT001_CR_1000T_FD_CAPS;
341 hw->autoneg_advertised |= ADVERTISE_1000_FULL; 334 hw->autoneg_advertised |= ADVERTISE_1000_FULL;
342 } 335 }
343 break; 336 break;
344 337
345 case MEDIA_TYPE_100M_FULL: 338 case MEDIA_TYPE_100M_FULL:
346 mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS; 339 mii_autoneg_adv_reg |= ADVERTISE_100FULL;
347 hw->autoneg_advertised = ADVERTISE_100_FULL; 340 hw->autoneg_advertised = ADVERTISE_100_FULL;
348 break; 341 break;
349 342
350 case MEDIA_TYPE_100M_HALF: 343 case MEDIA_TYPE_100M_HALF:
351 mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS; 344 mii_autoneg_adv_reg |= ADVERTISE_100_HALF;
352 hw->autoneg_advertised = ADVERTISE_100_HALF; 345 hw->autoneg_advertised = ADVERTISE_100_HALF;
353 break; 346 break;
354 347
355 case MEDIA_TYPE_10M_FULL: 348 case MEDIA_TYPE_10M_FULL:
356 mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS; 349 mii_autoneg_adv_reg |= ADVERTISE_10_FULL;
357 hw->autoneg_advertised = ADVERTISE_10_FULL; 350 hw->autoneg_advertised = ADVERTISE_10_FULL;
358 break; 351 break;
359 352
360 default: 353 default:
361 mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS; 354 mii_autoneg_adv_reg |= ADVERTISE_10_HALF;
362 hw->autoneg_advertised = ADVERTISE_10_HALF; 355 hw->autoneg_advertised = ADVERTISE_10_HALF;
363 break; 356 break;
364 } 357 }
365 358
366 /* flow control fixed to enable all */ 359 /* flow control fixed to enable all */
367 mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE); 360 mii_autoneg_adv_reg |= (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
368 361
369 hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg; 362 hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
370 hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg; 363 hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg;
@@ -374,7 +367,7 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
374 return ret_val; 367 return ret_val;
375 368
376 if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) { 369 if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) {
377 ret_val = atl1e_write_phy_reg(hw, MII_AT001_CR, 370 ret_val = atl1e_write_phy_reg(hw, MII_CTRL1000,
378 mii_1000t_ctrl_reg); 371 mii_1000t_ctrl_reg);
379 if (ret_val) 372 if (ret_val)
380 return ret_val; 373 return ret_val;
@@ -397,7 +390,7 @@ int atl1e_phy_commit(struct atl1e_hw *hw)
397 int ret_val; 390 int ret_val;
398 u16 phy_data; 391 u16 phy_data;
399 392
400 phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG; 393 phy_data = BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART;
401 394
402 ret_val = atl1e_write_phy_reg(hw, MII_BMCR, phy_data); 395 ret_val = atl1e_write_phy_reg(hw, MII_BMCR, phy_data);
403 if (ret_val) { 396 if (ret_val) {
@@ -645,15 +638,14 @@ int atl1e_restart_autoneg(struct atl1e_hw *hw)
645 return err; 638 return err;
646 639
647 if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) { 640 if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) {
648 err = atl1e_write_phy_reg(hw, MII_AT001_CR, 641 err = atl1e_write_phy_reg(hw, MII_CTRL1000,
649 hw->mii_1000t_ctrl_reg); 642 hw->mii_1000t_ctrl_reg);
650 if (err) 643 if (err)
651 return err; 644 return err;
652 } 645 }
653 646
654 err = atl1e_write_phy_reg(hw, MII_BMCR, 647 err = atl1e_write_phy_reg(hw, MII_BMCR,
655 MII_CR_RESET | MII_CR_AUTO_NEG_EN | 648 BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART);
656 MII_CR_RESTART_AUTO_NEG);
657 return err; 649 return err;
658} 650}
659 651
diff --git a/drivers/net/atl1e/atl1e_hw.h b/drivers/net/atl1e/atl1e_hw.h
index 5ea2f4d86cf..74df16aef79 100644
--- a/drivers/net/atl1e/atl1e_hw.h
+++ b/drivers/net/atl1e/atl1e_hw.h
@@ -629,127 +629,24 @@ s32 atl1e_restart_autoneg(struct atl1e_hw *hw);
629 629
630/***************************** MII definition ***************************************/ 630/***************************** MII definition ***************************************/
631/* PHY Common Register */ 631/* PHY Common Register */
632#define MII_BMCR 0x00
633#define MII_BMSR 0x01
634#define MII_PHYSID1 0x02
635#define MII_PHYSID2 0x03
636#define MII_ADVERTISE 0x04
637#define MII_LPA 0x05
638#define MII_EXPANSION 0x06
639#define MII_AT001_CR 0x09
640#define MII_AT001_SR 0x0A
641#define MII_AT001_ESR 0x0F
642#define MII_AT001_PSCR 0x10 632#define MII_AT001_PSCR 0x10
643#define MII_AT001_PSSR 0x11 633#define MII_AT001_PSSR 0x11
644#define MII_INT_CTRL 0x12 634#define MII_INT_CTRL 0x12
645#define MII_INT_STATUS 0x13 635#define MII_INT_STATUS 0x13
646#define MII_SMARTSPEED 0x14 636#define MII_SMARTSPEED 0x14
647#define MII_RERRCOUNTER 0x15
648#define MII_SREVISION 0x16
649#define MII_RESV1 0x17
650#define MII_LBRERROR 0x18 637#define MII_LBRERROR 0x18
651#define MII_PHYADDR 0x19
652#define MII_RESV2 0x1a 638#define MII_RESV2 0x1a
653#define MII_TPISTATUS 0x1b
654#define MII_NCONFIG 0x1c
655 639
656#define MII_DBG_ADDR 0x1D 640#define MII_DBG_ADDR 0x1D
657#define MII_DBG_DATA 0x1E 641#define MII_DBG_DATA 0x1E
658 642
659
660/* PHY Control Register */
661#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
662#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
663#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
664#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
665#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */
666#define MII_CR_POWER_DOWN 0x0800 /* Power down */
667#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
668#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
669#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
670#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
671#define MII_CR_SPEED_MASK 0x2040
672#define MII_CR_SPEED_1000 0x0040
673#define MII_CR_SPEED_100 0x2000
674#define MII_CR_SPEED_10 0x0000
675
676
677/* PHY Status Register */
678#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
679#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */
680#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
681#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
682#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
683#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
684#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
685#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
686#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
687#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
688#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
689#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
690#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
691#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
692#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
693
694/* Link partner ability register. */
695#define MII_LPA_SLCT 0x001f /* Same as advertise selector */
696#define MII_LPA_10HALF 0x0020 /* Can do 10mbps half-duplex */
697#define MII_LPA_10FULL 0x0040 /* Can do 10mbps full-duplex */
698#define MII_LPA_100HALF 0x0080 /* Can do 100mbps half-duplex */
699#define MII_LPA_100FULL 0x0100 /* Can do 100mbps full-duplex */
700#define MII_LPA_100BASE4 0x0200 /* 100BASE-T4 */
701#define MII_LPA_PAUSE 0x0400 /* PAUSE */
702#define MII_LPA_ASYPAUSE 0x0800 /* Asymmetrical PAUSE */
703#define MII_LPA_RFAULT 0x2000 /* Link partner faulted */
704#define MII_LPA_LPACK 0x4000 /* Link partner acked us */
705#define MII_LPA_NPAGE 0x8000 /* Next page bit */
706
707/* Autoneg Advertisement Register */ 643/* Autoneg Advertisement Register */
708#define MII_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */ 644#define MII_AR_DEFAULT_CAP_MASK 0
709#define MII_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
710#define MII_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
711#define MII_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
712#define MII_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
713#define MII_AR_100T4_CAPS 0x0200 /* 100T4 Capable */
714#define MII_AR_PAUSE 0x0400 /* Pause operation desired */
715#define MII_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
716#define MII_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */
717#define MII_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */
718#define MII_AR_SPEED_MASK 0x01E0
719#define MII_AR_DEFAULT_CAP_MASK 0x0DE0
720 645
721/* 1000BASE-T Control Register */ 646/* 1000BASE-T Control Register */
722#define MII_AT001_CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ 647#define MII_AT001_CR_1000T_SPEED_MASK \
723#define MII_AT001_CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ 648 (ADVERTISE_1000FULL | ADVERTISE_1000HALF)
724#define MII_AT001_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */ 649#define MII_AT001_CR_1000T_DEFAULT_CAP_MASK MII_AT001_CR_1000T_SPEED_MASK
725/* 0=DTE device */
726#define MII_AT001_CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */
727/* 0=Configure PHY as Slave */
728#define MII_AT001_CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */
729/* 0=Automatic Master/Slave config */
730#define MII_AT001_CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
731#define MII_AT001_CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */
732#define MII_AT001_CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */
733#define MII_AT001_CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */
734#define MII_AT001_CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */
735#define MII_AT001_CR_1000T_SPEED_MASK 0x0300
736#define MII_AT001_CR_1000T_DEFAULT_CAP_MASK 0x0300
737
738/* 1000BASE-T Status Register */
739#define MII_AT001_SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */
740#define MII_AT001_SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */
741#define MII_AT001_SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
742#define MII_AT001_SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
743#define MII_AT001_SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */
744#define MII_AT001_SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */
745#define MII_AT001_SR_1000T_REMOTE_RX_STATUS_SHIFT 12
746#define MII_AT001_SR_1000T_LOCAL_RX_STATUS_SHIFT 13
747
748/* Extended Status Register */
749#define MII_AT001_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */
750#define MII_AT001_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */
751#define MII_AT001_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */
752#define MII_AT001_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */
753 650
754/* AT001 PHY Specific Control Register */ 651/* AT001 PHY Specific Control Register */
755#define MII_AT001_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */ 652#define MII_AT001_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index e28f8baf394..1ff001a8270 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -547,8 +547,8 @@ static int __devinit atl1e_sw_init(struct atl1e_adapter *adapter)
547 hw->device_id = pdev->device; 547 hw->device_id = pdev->device;
548 hw->subsystem_vendor_id = pdev->subsystem_vendor; 548 hw->subsystem_vendor_id = pdev->subsystem_vendor;
549 hw->subsystem_id = pdev->subsystem_device; 549 hw->subsystem_id = pdev->subsystem_device;
550 hw->revision_id = pdev->revision;
550 551
551 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
552 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); 552 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
553 553
554 phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS); 554 phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS);
@@ -932,11 +932,11 @@ static inline void atl1e_configure_tx(struct atl1e_adapter *adapter)
932 max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT)) & 932 max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT)) &
933 DEVICE_CTRL_MAX_PAYLOAD_MASK; 933 DEVICE_CTRL_MAX_PAYLOAD_MASK;
934 934
935 hw->dmaw_block = min(max_pay_load, hw->dmaw_block); 935 hw->dmaw_block = min_t(u32, max_pay_load, hw->dmaw_block);
936 936
937 max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT)) & 937 max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT)) &
938 DEVICE_CTRL_MAX_RREQ_SZ_MASK; 938 DEVICE_CTRL_MAX_RREQ_SZ_MASK;
939 hw->dmar_block = min(max_pay_load, hw->dmar_block); 939 hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block);
940 940
941 if (hw->nic_type != athr_l2e_revB) 941 if (hw->nic_type != athr_l2e_revB)
942 AT_WRITE_REGW(hw, REG_TXQ_CTRL + 2, 942 AT_WRITE_REGW(hw, REG_TXQ_CTRL + 2,
@@ -2051,9 +2051,9 @@ static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
2051 atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data); 2051 atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
2052 atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data); 2052 atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
2053 2053
2054 mii_advertise_data = MII_AR_10T_HD_CAPS; 2054 mii_advertise_data = ADVERTISE_10HALF;
2055 2055
2056 if ((atl1e_write_phy_reg(hw, MII_AT001_CR, 0) != 0) || 2056 if ((atl1e_write_phy_reg(hw, MII_CTRL1000, 0) != 0) ||
2057 (atl1e_write_phy_reg(hw, 2057 (atl1e_write_phy_reg(hw,
2058 MII_ADVERTISE, mii_advertise_data) != 0) || 2058 MII_ADVERTISE, mii_advertise_data) != 0) ||
2059 (atl1e_phy_commit(hw)) != 0) { 2059 (atl1e_phy_commit(hw)) != 0) {
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 3b527687c28..67f40b9c16e 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -950,6 +950,7 @@ static int __devinit atl1_sw_init(struct atl1_adapter *adapter)
950 hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 950 hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
951 951
952 adapter->wol = 0; 952 adapter->wol = 0;
953 device_set_wakeup_enable(&adapter->pdev->dev, false);
953 adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7; 954 adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7;
954 adapter->ict = 50000; /* 100ms */ 955 adapter->ict = 50000; /* 100ms */
955 adapter->link_speed = SPEED_0; /* hardware init */ 956 adapter->link_speed = SPEED_0; /* hardware init */
@@ -2735,15 +2736,15 @@ static int atl1_close(struct net_device *netdev)
2735} 2736}
2736 2737
2737#ifdef CONFIG_PM 2738#ifdef CONFIG_PM
2738static int atl1_suspend(struct pci_dev *pdev, pm_message_t state) 2739static int atl1_suspend(struct device *dev)
2739{ 2740{
2741 struct pci_dev *pdev = to_pci_dev(dev);
2740 struct net_device *netdev = pci_get_drvdata(pdev); 2742 struct net_device *netdev = pci_get_drvdata(pdev);
2741 struct atl1_adapter *adapter = netdev_priv(netdev); 2743 struct atl1_adapter *adapter = netdev_priv(netdev);
2742 struct atl1_hw *hw = &adapter->hw; 2744 struct atl1_hw *hw = &adapter->hw;
2743 u32 ctrl = 0; 2745 u32 ctrl = 0;
2744 u32 wufc = adapter->wol; 2746 u32 wufc = adapter->wol;
2745 u32 val; 2747 u32 val;
2746 int retval;
2747 u16 speed; 2748 u16 speed;
2748 u16 duplex; 2749 u16 duplex;
2749 2750
@@ -2751,17 +2752,15 @@ static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
2751 if (netif_running(netdev)) 2752 if (netif_running(netdev))
2752 atl1_down(adapter); 2753 atl1_down(adapter);
2753 2754
2754 retval = pci_save_state(pdev);
2755 if (retval)
2756 return retval;
2757
2758 atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); 2755 atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
2759 atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); 2756 atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
2760 val = ctrl & BMSR_LSTATUS; 2757 val = ctrl & BMSR_LSTATUS;
2761 if (val) 2758 if (val)
2762 wufc &= ~ATLX_WUFC_LNKC; 2759 wufc &= ~ATLX_WUFC_LNKC;
2760 if (!wufc)
2761 goto disable_wol;
2763 2762
2764 if (val && wufc) { 2763 if (val) {
2765 val = atl1_get_speed_and_duplex(hw, &speed, &duplex); 2764 val = atl1_get_speed_and_duplex(hw, &speed, &duplex);
2766 if (val) { 2765 if (val) {
2767 if (netif_msg_ifdown(adapter)) 2766 if (netif_msg_ifdown(adapter))
@@ -2798,23 +2797,18 @@ static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
2798 ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; 2797 ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
2799 iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC); 2798 iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC);
2800 ioread32(hw->hw_addr + REG_PCIE_PHYMISC); 2799 ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
2801 2800 } else {
2802 pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
2803 goto exit;
2804 }
2805
2806 if (!val && wufc) {
2807 ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN); 2801 ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
2808 iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); 2802 iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL);
2809 ioread32(hw->hw_addr + REG_WOL_CTRL); 2803 ioread32(hw->hw_addr + REG_WOL_CTRL);
2810 iowrite32(0, hw->hw_addr + REG_MAC_CTRL); 2804 iowrite32(0, hw->hw_addr + REG_MAC_CTRL);
2811 ioread32(hw->hw_addr + REG_MAC_CTRL); 2805 ioread32(hw->hw_addr + REG_MAC_CTRL);
2812 hw->phy_configured = false; 2806 hw->phy_configured = false;
2813 pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
2814 goto exit;
2815 } 2807 }
2816 2808
2817disable_wol: 2809 return 0;
2810
2811 disable_wol:
2818 iowrite32(0, hw->hw_addr + REG_WOL_CTRL); 2812 iowrite32(0, hw->hw_addr + REG_WOL_CTRL);
2819 ioread32(hw->hw_addr + REG_WOL_CTRL); 2813 ioread32(hw->hw_addr + REG_WOL_CTRL);
2820 ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC); 2814 ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
@@ -2822,37 +2816,17 @@ disable_wol:
2822 iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC); 2816 iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC);
2823 ioread32(hw->hw_addr + REG_PCIE_PHYMISC); 2817 ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
2824 hw->phy_configured = false; 2818 hw->phy_configured = false;
2825 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
2826exit:
2827 if (netif_running(netdev))
2828 pci_disable_msi(adapter->pdev);
2829 pci_disable_device(pdev);
2830 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2831 2819
2832 return 0; 2820 return 0;
2833} 2821}
2834 2822
2835static int atl1_resume(struct pci_dev *pdev) 2823static int atl1_resume(struct device *dev)
2836{ 2824{
2825 struct pci_dev *pdev = to_pci_dev(dev);
2837 struct net_device *netdev = pci_get_drvdata(pdev); 2826 struct net_device *netdev = pci_get_drvdata(pdev);
2838 struct atl1_adapter *adapter = netdev_priv(netdev); 2827 struct atl1_adapter *adapter = netdev_priv(netdev);
2839 u32 err;
2840 2828
2841 pci_set_power_state(pdev, PCI_D0);
2842 pci_restore_state(pdev);
2843
2844 err = pci_enable_device(pdev);
2845 if (err) {
2846 if (netif_msg_ifup(adapter))
2847 dev_printk(KERN_DEBUG, &pdev->dev,
2848 "error enabling pci device\n");
2849 return err;
2850 }
2851
2852 pci_set_master(pdev);
2853 iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL); 2829 iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL);
2854 pci_enable_wake(pdev, PCI_D3hot, 0);
2855 pci_enable_wake(pdev, PCI_D3cold, 0);
2856 2830
2857 atl1_reset_hw(&adapter->hw); 2831 atl1_reset_hw(&adapter->hw);
2858 2832
@@ -2864,16 +2838,25 @@ static int atl1_resume(struct pci_dev *pdev)
2864 2838
2865 return 0; 2839 return 0;
2866} 2840}
2841
2842static SIMPLE_DEV_PM_OPS(atl1_pm_ops, atl1_suspend, atl1_resume);
2843#define ATL1_PM_OPS (&atl1_pm_ops)
2844
2867#else 2845#else
2868#define atl1_suspend NULL 2846
2869#define atl1_resume NULL 2847static int atl1_suspend(struct device *dev) { return 0; }
2848
2849#define ATL1_PM_OPS NULL
2870#endif 2850#endif
2871 2851
2872static void atl1_shutdown(struct pci_dev *pdev) 2852static void atl1_shutdown(struct pci_dev *pdev)
2873{ 2853{
2874#ifdef CONFIG_PM 2854 struct net_device *netdev = pci_get_drvdata(pdev);
2875 atl1_suspend(pdev, PMSG_SUSPEND); 2855 struct atl1_adapter *adapter = netdev_priv(netdev);
2876#endif 2856
2857 atl1_suspend(&pdev->dev);
2858 pci_wake_from_d3(pdev, adapter->wol);
2859 pci_set_power_state(pdev, PCI_D3hot);
2877} 2860}
2878 2861
2879#ifdef CONFIG_NET_POLL_CONTROLLER 2862#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -3117,9 +3100,8 @@ static struct pci_driver atl1_driver = {
3117 .id_table = atl1_pci_tbl, 3100 .id_table = atl1_pci_tbl,
3118 .probe = atl1_probe, 3101 .probe = atl1_probe,
3119 .remove = __devexit_p(atl1_remove), 3102 .remove = __devexit_p(atl1_remove),
3120 .suspend = atl1_suspend, 3103 .shutdown = atl1_shutdown,
3121 .resume = atl1_resume, 3104 .driver.pm = ATL1_PM_OPS,
3122 .shutdown = atl1_shutdown
3123}; 3105};
3124 3106
3125/* 3107/*
@@ -3409,6 +3391,9 @@ static int atl1_set_wol(struct net_device *netdev,
3409 adapter->wol = 0; 3391 adapter->wol = 0;
3410 if (wol->wolopts & WAKE_MAGIC) 3392 if (wol->wolopts & WAKE_MAGIC)
3411 adapter->wol |= ATLX_WUFC_MAG; 3393 adapter->wol |= ATLX_WUFC_MAG;
3394
3395 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
3396
3412 return 0; 3397 return 0;
3413} 3398}
3414 3399
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index 4e6f4e95a5a..e637e9f28fd 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -93,8 +93,8 @@ static int __devinit atl2_sw_init(struct atl2_adapter *adapter)
93 hw->device_id = pdev->device; 93 hw->device_id = pdev->device;
94 hw->subsystem_vendor_id = pdev->subsystem_vendor; 94 hw->subsystem_vendor_id = pdev->subsystem_vendor;
95 hw->subsystem_id = pdev->subsystem_device; 95 hw->subsystem_id = pdev->subsystem_device;
96 hw->revision_id = pdev->revision;
96 97
97 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
98 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); 98 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
99 99
100 adapter->wol = 0; 100 adapter->wol = 0;
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index 4bebff3faea..e7cb8c8b977 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -9,7 +9,7 @@
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as 10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12*/ 12 */
13 13
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/kernel.h> 15#include <linux/kernel.h>
@@ -17,46 +17,45 @@
17#include <linux/isapnp.h> 17#include <linux/isapnp.h>
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/io.h>
20#include <linux/platform_device.h> 21#include <linux/platform_device.h>
21#include <linux/delay.h> 22#include <linux/delay.h>
22#include <linux/timer.h> 23#include <linux/timer.h>
23#include <linux/netdevice.h> 24#include <linux/netdevice.h>
24#include <linux/etherdevice.h> 25#include <linux/etherdevice.h>
25#include <linux/ethtool.h> 26#include <linux/ethtool.h>
26#include <linux/mii.h> 27#include <linux/mdio-bitbang.h>
28#include <linux/phy.h>
27#include <linux/eeprom_93cx6.h> 29#include <linux/eeprom_93cx6.h>
28#include <linux/slab.h> 30#include <linux/slab.h>
29 31
30#include <net/ax88796.h> 32#include <net/ax88796.h>
31 33
32#include <asm/system.h> 34#include <asm/system.h>
33#include <asm/io.h>
34
35static int phy_debug = 0;
36 35
37/* Rename the lib8390.c functions to show that they are in this driver */ 36/* Rename the lib8390.c functions to show that they are in this driver */
38#define __ei_open ax_ei_open 37#define __ei_open ax_ei_open
39#define __ei_close ax_ei_close 38#define __ei_close ax_ei_close
40#define __ei_poll ax_ei_poll 39#define __ei_poll ax_ei_poll
41#define __ei_start_xmit ax_ei_start_xmit 40#define __ei_start_xmit ax_ei_start_xmit
42#define __ei_tx_timeout ax_ei_tx_timeout 41#define __ei_tx_timeout ax_ei_tx_timeout
43#define __ei_get_stats ax_ei_get_stats 42#define __ei_get_stats ax_ei_get_stats
44#define __ei_set_multicast_list ax_ei_set_multicast_list 43#define __ei_set_multicast_list ax_ei_set_multicast_list
45#define __ei_interrupt ax_ei_interrupt 44#define __ei_interrupt ax_ei_interrupt
46#define ____alloc_ei_netdev ax__alloc_ei_netdev 45#define ____alloc_ei_netdev ax__alloc_ei_netdev
47#define __NS8390_init ax_NS8390_init 46#define __NS8390_init ax_NS8390_init
48 47
49/* force unsigned long back to 'void __iomem *' */ 48/* force unsigned long back to 'void __iomem *' */
50#define ax_convert_addr(_a) ((void __force __iomem *)(_a)) 49#define ax_convert_addr(_a) ((void __force __iomem *)(_a))
51 50
52#define ei_inb(_a) readb(ax_convert_addr(_a)) 51#define ei_inb(_a) readb(ax_convert_addr(_a))
53#define ei_outb(_v, _a) writeb(_v, ax_convert_addr(_a)) 52#define ei_outb(_v, _a) writeb(_v, ax_convert_addr(_a))
54 53
55#define ei_inb_p(_a) ei_inb(_a) 54#define ei_inb_p(_a) ei_inb(_a)
56#define ei_outb_p(_v, _a) ei_outb(_v, _a) 55#define ei_outb_p(_v, _a) ei_outb(_v, _a)
57 56
58/* define EI_SHIFT() to take into account our register offsets */ 57/* define EI_SHIFT() to take into account our register offsets */
59#define EI_SHIFT(x) (ei_local->reg_offset[(x)]) 58#define EI_SHIFT(x) (ei_local->reg_offset[(x)])
60 59
61/* Ensure we have our RCR base value */ 60/* Ensure we have our RCR base value */
62#define AX88796_PLATFORM 61#define AX88796_PLATFORM
@@ -74,43 +73,46 @@ static unsigned char version[] = "ax88796.c: Copyright 2005,2007 Simtec Electron
74#define NE_DATAPORT EI_SHIFT(0x10) 73#define NE_DATAPORT EI_SHIFT(0x10)
75 74
76#define NE1SM_START_PG 0x20 /* First page of TX buffer */ 75#define NE1SM_START_PG 0x20 /* First page of TX buffer */
77#define NE1SM_STOP_PG 0x40 /* Last page +1 of RX ring */ 76#define NE1SM_STOP_PG 0x40 /* Last page +1 of RX ring */
78#define NESM_START_PG 0x40 /* First page of TX buffer */ 77#define NESM_START_PG 0x40 /* First page of TX buffer */
79#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ 78#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
80 79
80#define AX_GPOC_PPDSET BIT(6)
81
81/* device private data */ 82/* device private data */
82 83
83struct ax_device { 84struct ax_device {
84 struct timer_list mii_timer; 85 struct mii_bus *mii_bus;
85 spinlock_t mii_lock; 86 struct mdiobb_ctrl bb_ctrl;
86 struct mii_if_info mii; 87 struct phy_device *phy_dev;
87 88 void __iomem *addr_memr;
88 u32 msg_enable; 89 u8 reg_memr;
89 void __iomem *map2; 90 int link;
90 struct platform_device *dev; 91 int speed;
91 struct resource *mem; 92 int duplex;
92 struct resource *mem2; 93
93 struct ax_plat_data *plat; 94 void __iomem *map2;
94 95 const struct ax_plat_data *plat;
95 unsigned char running; 96
96 unsigned char resume_open; 97 unsigned char running;
97 unsigned int irqflags; 98 unsigned char resume_open;
98 99 unsigned int irqflags;
99 u32 reg_offsets[0x20]; 100
101 u32 reg_offsets[0x20];
100}; 102};
101 103
102static inline struct ax_device *to_ax_dev(struct net_device *dev) 104static inline struct ax_device *to_ax_dev(struct net_device *dev)
103{ 105{
104 struct ei_device *ei_local = netdev_priv(dev); 106 struct ei_device *ei_local = netdev_priv(dev);
105 return (struct ax_device *)(ei_local+1); 107 return (struct ax_device *)(ei_local + 1);
106} 108}
107 109
108/* ax_initial_check 110/*
111 * ax_initial_check
109 * 112 *
110 * do an initial probe for the card to check wether it exists 113 * do an initial probe for the card to check wether it exists
111 * and is functional 114 * and is functional
112 */ 115 */
113
114static int ax_initial_check(struct net_device *dev) 116static int ax_initial_check(struct net_device *dev)
115{ 117{
116 struct ei_device *ei_local = netdev_priv(dev); 118 struct ei_device *ei_local = netdev_priv(dev);
@@ -122,10 +124,10 @@ static int ax_initial_check(struct net_device *dev)
122 if (reg0 == 0xFF) 124 if (reg0 == 0xFF)
123 return -ENODEV; 125 return -ENODEV;
124 126
125 ei_outb(E8390_NODMA+E8390_PAGE1+E8390_STOP, ioaddr + E8390_CMD); 127 ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP, ioaddr + E8390_CMD);
126 regd = ei_inb(ioaddr + 0x0d); 128 regd = ei_inb(ioaddr + 0x0d);
127 ei_outb(0xff, ioaddr + 0x0d); 129 ei_outb(0xff, ioaddr + 0x0d);
128 ei_outb(E8390_NODMA+E8390_PAGE0, ioaddr + E8390_CMD); 130 ei_outb(E8390_NODMA + E8390_PAGE0, ioaddr + E8390_CMD);
129 ei_inb(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */ 131 ei_inb(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */
130 if (ei_inb(ioaddr + EN0_COUNTER0) != 0) { 132 if (ei_inb(ioaddr + EN0_COUNTER0) != 0) {
131 ei_outb(reg0, ioaddr); 133 ei_outb(reg0, ioaddr);
@@ -136,29 +138,28 @@ static int ax_initial_check(struct net_device *dev)
136 return 0; 138 return 0;
137} 139}
138 140
139/* Hard reset the card. This used to pause for the same period that a 141/*
140 8390 reset command required, but that shouldn't be necessary. */ 142 * Hard reset the card. This used to pause for the same period that a
141 143 * 8390 reset command required, but that shouldn't be necessary.
144 */
142static void ax_reset_8390(struct net_device *dev) 145static void ax_reset_8390(struct net_device *dev)
143{ 146{
144 struct ei_device *ei_local = netdev_priv(dev); 147 struct ei_device *ei_local = netdev_priv(dev);
145 struct ax_device *ax = to_ax_dev(dev);
146 unsigned long reset_start_time = jiffies; 148 unsigned long reset_start_time = jiffies;
147 void __iomem *addr = (void __iomem *)dev->base_addr; 149 void __iomem *addr = (void __iomem *)dev->base_addr;
148 150
149 if (ei_debug > 1) 151 if (ei_debug > 1)
150 dev_dbg(&ax->dev->dev, "resetting the 8390 t=%ld\n", jiffies); 152 netdev_dbg(dev, "resetting the 8390 t=%ld\n", jiffies);
151 153
152 ei_outb(ei_inb(addr + NE_RESET), addr + NE_RESET); 154 ei_outb(ei_inb(addr + NE_RESET), addr + NE_RESET);
153 155
154 ei_status.txing = 0; 156 ei_local->txing = 0;
155 ei_status.dmaing = 0; 157 ei_local->dmaing = 0;
156 158
157 /* This check _should_not_ be necessary, omit eventually. */ 159 /* This check _should_not_ be necessary, omit eventually. */
158 while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) { 160 while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) {
159 if (jiffies - reset_start_time > 2*HZ/100) { 161 if (jiffies - reset_start_time > 2 * HZ / 100) {
160 dev_warn(&ax->dev->dev, "%s: %s did not complete.\n", 162 netdev_warn(dev, "%s: did not complete.\n", __func__);
161 __func__, dev->name);
162 break; 163 break;
163 } 164 }
164 } 165 }
@@ -171,70 +172,72 @@ static void ax_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
171 int ring_page) 172 int ring_page)
172{ 173{
173 struct ei_device *ei_local = netdev_priv(dev); 174 struct ei_device *ei_local = netdev_priv(dev);
174 struct ax_device *ax = to_ax_dev(dev);
175 void __iomem *nic_base = ei_local->mem; 175 void __iomem *nic_base = ei_local->mem;
176 176
177 /* This *shouldn't* happen. If it does, it's the last thing you'll see */ 177 /* This *shouldn't* happen. If it does, it's the last thing you'll see */
178 if (ei_status.dmaing) { 178 if (ei_local->dmaing) {
179 dev_err(&ax->dev->dev, "%s: DMAing conflict in %s " 179 netdev_err(dev, "DMAing conflict in %s "
180 "[DMAstat:%d][irqlock:%d].\n", 180 "[DMAstat:%d][irqlock:%d].\n",
181 dev->name, __func__, 181 __func__,
182 ei_status.dmaing, ei_status.irqlock); 182 ei_local->dmaing, ei_local->irqlock);
183 return; 183 return;
184 } 184 }
185 185
186 ei_status.dmaing |= 0x01; 186 ei_local->dmaing |= 0x01;
187 ei_outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD); 187 ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_START, nic_base + NE_CMD);
188 ei_outb(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO); 188 ei_outb(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
189 ei_outb(0, nic_base + EN0_RCNTHI); 189 ei_outb(0, nic_base + EN0_RCNTHI);
190 ei_outb(0, nic_base + EN0_RSARLO); /* On page boundary */ 190 ei_outb(0, nic_base + EN0_RSARLO); /* On page boundary */
191 ei_outb(ring_page, nic_base + EN0_RSARHI); 191 ei_outb(ring_page, nic_base + EN0_RSARHI);
192 ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD); 192 ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
193 193
194 if (ei_status.word16) 194 if (ei_local->word16)
195 readsw(nic_base + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1); 195 readsw(nic_base + NE_DATAPORT, hdr,
196 sizeof(struct e8390_pkt_hdr) >> 1);
196 else 197 else
197 readsb(nic_base + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)); 198 readsb(nic_base + NE_DATAPORT, hdr,
199 sizeof(struct e8390_pkt_hdr));
198 200
199 ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ 201 ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
200 ei_status.dmaing &= ~0x01; 202 ei_local->dmaing &= ~0x01;
201 203
202 le16_to_cpus(&hdr->count); 204 le16_to_cpus(&hdr->count);
203} 205}
204 206
205 207
206/* Block input and output, similar to the Crynwr packet driver. If you 208/*
207 are porting to a new ethercard, look at the packet driver source for hints. 209 * Block input and output, similar to the Crynwr packet driver. If
208 The NEx000 doesn't share the on-board packet memory -- you have to put 210 * you are porting to a new ethercard, look at the packet driver
209 the packet out through the "remote DMA" dataport using ei_outb. */ 211 * source for hints. The NEx000 doesn't share the on-board packet
210 212 * memory -- you have to put the packet out through the "remote DMA"
213 * dataport using ei_outb.
214 */
211static void ax_block_input(struct net_device *dev, int count, 215static void ax_block_input(struct net_device *dev, int count,
212 struct sk_buff *skb, int ring_offset) 216 struct sk_buff *skb, int ring_offset)
213{ 217{
214 struct ei_device *ei_local = netdev_priv(dev); 218 struct ei_device *ei_local = netdev_priv(dev);
215 struct ax_device *ax = to_ax_dev(dev);
216 void __iomem *nic_base = ei_local->mem; 219 void __iomem *nic_base = ei_local->mem;
217 char *buf = skb->data; 220 char *buf = skb->data;
218 221
219 if (ei_status.dmaing) { 222 if (ei_local->dmaing) {
220 dev_err(&ax->dev->dev, 223 netdev_err(dev,
221 "%s: DMAing conflict in %s " 224 "DMAing conflict in %s "
222 "[DMAstat:%d][irqlock:%d].\n", 225 "[DMAstat:%d][irqlock:%d].\n",
223 dev->name, __func__, 226 __func__,
224 ei_status.dmaing, ei_status.irqlock); 227 ei_local->dmaing, ei_local->irqlock);
225 return; 228 return;
226 } 229 }
227 230
228 ei_status.dmaing |= 0x01; 231 ei_local->dmaing |= 0x01;
229 232
230 ei_outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD); 233 ei_outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base + NE_CMD);
231 ei_outb(count & 0xff, nic_base + EN0_RCNTLO); 234 ei_outb(count & 0xff, nic_base + EN0_RCNTLO);
232 ei_outb(count >> 8, nic_base + EN0_RCNTHI); 235 ei_outb(count >> 8, nic_base + EN0_RCNTHI);
233 ei_outb(ring_offset & 0xff, nic_base + EN0_RSARLO); 236 ei_outb(ring_offset & 0xff, nic_base + EN0_RSARLO);
234 ei_outb(ring_offset >> 8, nic_base + EN0_RSARHI); 237 ei_outb(ring_offset >> 8, nic_base + EN0_RSARHI);
235 ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD); 238 ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
236 239
237 if (ei_status.word16) { 240 if (ei_local->word16) {
238 readsw(nic_base + NE_DATAPORT, buf, count >> 1); 241 readsw(nic_base + NE_DATAPORT, buf, count >> 1);
239 if (count & 0x01) 242 if (count & 0x01)
240 buf[count-1] = ei_inb(nic_base + NE_DATAPORT); 243 buf[count-1] = ei_inb(nic_base + NE_DATAPORT);
@@ -243,34 +246,34 @@ static void ax_block_input(struct net_device *dev, int count,
243 readsb(nic_base + NE_DATAPORT, buf, count); 246 readsb(nic_base + NE_DATAPORT, buf, count);
244 } 247 }
245 248
246 ei_status.dmaing &= ~1; 249 ei_local->dmaing &= ~1;
247} 250}
248 251
249static void ax_block_output(struct net_device *dev, int count, 252static void ax_block_output(struct net_device *dev, int count,
250 const unsigned char *buf, const int start_page) 253 const unsigned char *buf, const int start_page)
251{ 254{
252 struct ei_device *ei_local = netdev_priv(dev); 255 struct ei_device *ei_local = netdev_priv(dev);
253 struct ax_device *ax = to_ax_dev(dev);
254 void __iomem *nic_base = ei_local->mem; 256 void __iomem *nic_base = ei_local->mem;
255 unsigned long dma_start; 257 unsigned long dma_start;
256 258
257 /* Round the count up for word writes. Do we need to do this? 259 /*
258 What effect will an odd byte count have on the 8390? 260 * Round the count up for word writes. Do we need to do this?
259 I should check someday. */ 261 * What effect will an odd byte count have on the 8390? I
260 262 * should check someday.
261 if (ei_status.word16 && (count & 0x01)) 263 */
264 if (ei_local->word16 && (count & 0x01))
262 count++; 265 count++;
263 266
264 /* This *shouldn't* happen. If it does, it's the last thing you'll see */ 267 /* This *shouldn't* happen. If it does, it's the last thing you'll see */
265 if (ei_status.dmaing) { 268 if (ei_local->dmaing) {
266 dev_err(&ax->dev->dev, "%s: DMAing conflict in %s." 269 netdev_err(dev, "DMAing conflict in %s."
267 "[DMAstat:%d][irqlock:%d]\n", 270 "[DMAstat:%d][irqlock:%d]\n",
268 dev->name, __func__, 271 __func__,
269 ei_status.dmaing, ei_status.irqlock); 272 ei_local->dmaing, ei_local->irqlock);
270 return; 273 return;
271 } 274 }
272 275
273 ei_status.dmaing |= 0x01; 276 ei_local->dmaing |= 0x01;
274 /* We should already be in page 0, but to be safe... */ 277 /* We should already be in page 0, but to be safe... */
275 ei_outb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD); 278 ei_outb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD);
276 279
@@ -278,250 +281,170 @@ static void ax_block_output(struct net_device *dev, int count,
278 281
279 /* Now the normal output. */ 282 /* Now the normal output. */
280 ei_outb(count & 0xff, nic_base + EN0_RCNTLO); 283 ei_outb(count & 0xff, nic_base + EN0_RCNTLO);
281 ei_outb(count >> 8, nic_base + EN0_RCNTHI); 284 ei_outb(count >> 8, nic_base + EN0_RCNTHI);
282 ei_outb(0x00, nic_base + EN0_RSARLO); 285 ei_outb(0x00, nic_base + EN0_RSARLO);
283 ei_outb(start_page, nic_base + EN0_RSARHI); 286 ei_outb(start_page, nic_base + EN0_RSARHI);
284 287
285 ei_outb(E8390_RWRITE+E8390_START, nic_base + NE_CMD); 288 ei_outb(E8390_RWRITE+E8390_START, nic_base + NE_CMD);
286 if (ei_status.word16) { 289 if (ei_local->word16)
287 writesw(nic_base + NE_DATAPORT, buf, count>>1); 290 writesw(nic_base + NE_DATAPORT, buf, count >> 1);
288 } else { 291 else
289 writesb(nic_base + NE_DATAPORT, buf, count); 292 writesb(nic_base + NE_DATAPORT, buf, count);
290 }
291 293
292 dma_start = jiffies; 294 dma_start = jiffies;
293 295
294 while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) { 296 while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) {
295 if (jiffies - dma_start > 2*HZ/100) { /* 20ms */ 297 if (jiffies - dma_start > 2 * HZ / 100) { /* 20ms */
296 dev_warn(&ax->dev->dev, 298 netdev_warn(dev, "timeout waiting for Tx RDC.\n");
297 "%s: timeout waiting for Tx RDC.\n", dev->name);
298 ax_reset_8390(dev); 299 ax_reset_8390(dev);
299 ax_NS8390_init(dev,1); 300 ax_NS8390_init(dev, 1);
300 break; 301 break;
301 } 302 }
302 } 303 }
303 304
304 ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ 305 ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
305 ei_status.dmaing &= ~0x01; 306 ei_local->dmaing &= ~0x01;
306} 307}
307 308
308/* definitions for accessing MII/EEPROM interface */ 309/* definitions for accessing MII/EEPROM interface */
309 310
310#define AX_MEMR EI_SHIFT(0x14) 311#define AX_MEMR EI_SHIFT(0x14)
311#define AX_MEMR_MDC (1<<0) 312#define AX_MEMR_MDC BIT(0)
312#define AX_MEMR_MDIR (1<<1) 313#define AX_MEMR_MDIR BIT(1)
313#define AX_MEMR_MDI (1<<2) 314#define AX_MEMR_MDI BIT(2)
314#define AX_MEMR_MDO (1<<3) 315#define AX_MEMR_MDO BIT(3)
315#define AX_MEMR_EECS (1<<4) 316#define AX_MEMR_EECS BIT(4)
316#define AX_MEMR_EEI (1<<5) 317#define AX_MEMR_EEI BIT(5)
317#define AX_MEMR_EEO (1<<6) 318#define AX_MEMR_EEO BIT(6)
318#define AX_MEMR_EECLK (1<<7) 319#define AX_MEMR_EECLK BIT(7)
319 320
320/* ax_mii_ei_outbits 321static void ax_handle_link_change(struct net_device *dev)
321 *
322 * write the specified set of bits to the phy
323*/
324
325static void
326ax_mii_ei_outbits(struct net_device *dev, unsigned int bits, int len)
327{ 322{
328 struct ei_device *ei_local = netdev_priv(dev); 323 struct ax_device *ax = to_ax_dev(dev);
329 void __iomem *memr_addr = (void __iomem *)dev->base_addr + AX_MEMR; 324 struct phy_device *phy_dev = ax->phy_dev;
330 unsigned int memr; 325 int status_change = 0;
331
332 /* clock low, data to output mode */
333 memr = ei_inb(memr_addr);
334 memr &= ~(AX_MEMR_MDC | AX_MEMR_MDIR);
335 ei_outb(memr, memr_addr);
336
337 for (len--; len >= 0; len--) {
338 if (bits & (1 << len))
339 memr |= AX_MEMR_MDO;
340 else
341 memr &= ~AX_MEMR_MDO;
342
343 ei_outb(memr, memr_addr);
344
345 /* clock high */
346 326
347 ei_outb(memr | AX_MEMR_MDC, memr_addr); 327 if (phy_dev->link && ((ax->speed != phy_dev->speed) ||
348 udelay(1); 328 (ax->duplex != phy_dev->duplex))) {
349 329
350 /* clock low */ 330 ax->speed = phy_dev->speed;
351 ei_outb(memr, memr_addr); 331 ax->duplex = phy_dev->duplex;
332 status_change = 1;
352 } 333 }
353 334
354 /* leaves the clock line low, mdir input */ 335 if (phy_dev->link != ax->link) {
355 memr |= AX_MEMR_MDIR; 336 if (!phy_dev->link) {
356 ei_outb(memr, (void __iomem *)dev->base_addr + AX_MEMR); 337 ax->speed = 0;
357} 338 ax->duplex = -1;
358 339 }
359/* ax_phy_ei_inbits 340 ax->link = phy_dev->link;
360 *
361 * read a specified number of bits from the phy
362*/
363
364static unsigned int
365ax_phy_ei_inbits(struct net_device *dev, int no)
366{
367 struct ei_device *ei_local = netdev_priv(dev);
368 void __iomem *memr_addr = (void __iomem *)dev->base_addr + AX_MEMR;
369 unsigned int memr;
370 unsigned int result = 0;
371
372 /* clock low, data to input mode */
373 memr = ei_inb(memr_addr);
374 memr &= ~AX_MEMR_MDC;
375 memr |= AX_MEMR_MDIR;
376 ei_outb(memr, memr_addr);
377
378 for (no--; no >= 0; no--) {
379 ei_outb(memr | AX_MEMR_MDC, memr_addr);
380
381 udelay(1);
382
383 if (ei_inb(memr_addr) & AX_MEMR_MDI)
384 result |= (1<<no);
385 341
386 ei_outb(memr, memr_addr); 342 status_change = 1;
387 } 343 }
388 344
389 return result; 345 if (status_change)
390} 346 phy_print_status(phy_dev);
391
392/* ax_phy_issueaddr
393 *
394 * use the low level bit shifting routines to send the address
395 * and command to the specified phy
396*/
397
398static void
399ax_phy_issueaddr(struct net_device *dev, int phy_addr, int reg, int opc)
400{
401 if (phy_debug)
402 pr_debug("%s: dev %p, %04x, %04x, %d\n",
403 __func__, dev, phy_addr, reg, opc);
404
405 ax_mii_ei_outbits(dev, 0x3f, 6); /* pre-amble */
406 ax_mii_ei_outbits(dev, 1, 2); /* frame-start */
407 ax_mii_ei_outbits(dev, opc, 2); /* op code */
408 ax_mii_ei_outbits(dev, phy_addr, 5); /* phy address */
409 ax_mii_ei_outbits(dev, reg, 5); /* reg address */
410} 347}
411 348
412static int 349static int ax_mii_probe(struct net_device *dev)
413ax_phy_read(struct net_device *dev, int phy_addr, int reg)
414{ 350{
415 struct ei_device *ei_local = netdev_priv(dev); 351 struct ax_device *ax = to_ax_dev(dev);
416 unsigned long flags; 352 struct phy_device *phy_dev = NULL;
417 unsigned int result; 353 int ret;
418 354
419 spin_lock_irqsave(&ei_local->page_lock, flags); 355 /* find the first phy */
356 phy_dev = phy_find_first(ax->mii_bus);
357 if (!phy_dev) {
358 netdev_err(dev, "no PHY found\n");
359 return -ENODEV;
360 }
420 361
421 ax_phy_issueaddr(dev, phy_addr, reg, 2); 362 ret = phy_connect_direct(dev, phy_dev, ax_handle_link_change, 0,
363 PHY_INTERFACE_MODE_MII);
364 if (ret) {
365 netdev_err(dev, "Could not attach to PHY\n");
366 return ret;
367 }
422 368
423 result = ax_phy_ei_inbits(dev, 17); 369 /* mask with MAC supported features */
424 result &= ~(3<<16); 370 phy_dev->supported &= PHY_BASIC_FEATURES;
371 phy_dev->advertising = phy_dev->supported;
425 372
426 spin_unlock_irqrestore(&ei_local->page_lock, flags); 373 ax->phy_dev = phy_dev;
427 374
428 if (phy_debug) 375 netdev_info(dev, "PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
429 pr_debug("%s: %04x.%04x => read %04x\n", __func__, 376 phy_dev->drv->name, dev_name(&phy_dev->dev), phy_dev->irq);
430 phy_addr, reg, result);
431 377
432 return result; 378 return 0;
433} 379}
434 380
435static void 381static void ax_phy_switch(struct net_device *dev, int on)
436ax_phy_write(struct net_device *dev, int phy_addr, int reg, int value)
437{ 382{
438 struct ei_device *ei = netdev_priv(dev); 383 struct ei_device *ei_local = netdev_priv(dev);
439 struct ax_device *ax = to_ax_dev(dev); 384 struct ax_device *ax = to_ax_dev(dev);
440 unsigned long flags;
441
442 dev_dbg(&ax->dev->dev, "%s: %p, %04x, %04x %04x\n",
443 __func__, dev, phy_addr, reg, value);
444
445 spin_lock_irqsave(&ei->page_lock, flags);
446
447 ax_phy_issueaddr(dev, phy_addr, reg, 1);
448 ax_mii_ei_outbits(dev, 2, 2); /* send TA */
449 ax_mii_ei_outbits(dev, value, 16);
450
451 spin_unlock_irqrestore(&ei->page_lock, flags);
452}
453 385
454static void ax_mii_expiry(unsigned long data) 386 u8 reg_gpoc = ax->plat->gpoc_val;
455{
456 struct net_device *dev = (struct net_device *)data;
457 struct ax_device *ax = to_ax_dev(dev);
458 unsigned long flags;
459 387
460 spin_lock_irqsave(&ax->mii_lock, flags); 388 if (!!on)
461 mii_check_media(&ax->mii, netif_msg_link(ax), 0); 389 reg_gpoc &= ~AX_GPOC_PPDSET;
462 spin_unlock_irqrestore(&ax->mii_lock, flags); 390 else
391 reg_gpoc |= AX_GPOC_PPDSET;
463 392
464 if (ax->running) { 393 ei_outb(reg_gpoc, ei_local->mem + EI_SHIFT(0x17));
465 ax->mii_timer.expires = jiffies + HZ*2;
466 add_timer(&ax->mii_timer);
467 }
468} 394}
469 395
470static int ax_open(struct net_device *dev) 396static int ax_open(struct net_device *dev)
471{ 397{
472 struct ax_device *ax = to_ax_dev(dev); 398 struct ax_device *ax = to_ax_dev(dev);
473 struct ei_device *ei_local = netdev_priv(dev);
474 int ret; 399 int ret;
475 400
476 dev_dbg(&ax->dev->dev, "%s: open\n", dev->name); 401 netdev_dbg(dev, "open\n");
477 402
478 ret = request_irq(dev->irq, ax_ei_interrupt, ax->irqflags, 403 ret = request_irq(dev->irq, ax_ei_interrupt, ax->irqflags,
479 dev->name, dev); 404 dev->name, dev);
480 if (ret) 405 if (ret)
481 return ret; 406 goto failed_request_irq;
482
483 ret = ax_ei_open(dev);
484 if (ret) {
485 free_irq(dev->irq, dev);
486 return ret;
487 }
488 407
489 /* turn the phy on (if turned off) */ 408 /* turn the phy on (if turned off) */
409 ax_phy_switch(dev, 1);
490 410
491 ei_outb(ax->plat->gpoc_val, ei_local->mem + EI_SHIFT(0x17)); 411 ret = ax_mii_probe(dev);
492 ax->running = 1; 412 if (ret)
493 413 goto failed_mii_probe;
494 /* start the MII timer */ 414 phy_start(ax->phy_dev);
495
496 init_timer(&ax->mii_timer);
497 415
498 ax->mii_timer.expires = jiffies+1; 416 ret = ax_ei_open(dev);
499 ax->mii_timer.data = (unsigned long) dev; 417 if (ret)
500 ax->mii_timer.function = ax_mii_expiry; 418 goto failed_ax_ei_open;
501 419
502 add_timer(&ax->mii_timer); 420 ax->running = 1;
503 421
504 return 0; 422 return 0;
423
424 failed_ax_ei_open:
425 phy_disconnect(ax->phy_dev);
426 failed_mii_probe:
427 ax_phy_switch(dev, 0);
428 free_irq(dev->irq, dev);
429 failed_request_irq:
430 return ret;
505} 431}
506 432
507static int ax_close(struct net_device *dev) 433static int ax_close(struct net_device *dev)
508{ 434{
509 struct ax_device *ax = to_ax_dev(dev); 435 struct ax_device *ax = to_ax_dev(dev);
510 struct ei_device *ei_local = netdev_priv(dev);
511 436
512 dev_dbg(&ax->dev->dev, "%s: close\n", dev->name); 437 netdev_dbg(dev, "close\n");
513
514 /* turn the phy off */
515
516 ei_outb(ax->plat->gpoc_val | (1<<6),
517 ei_local->mem + EI_SHIFT(0x17));
518 438
519 ax->running = 0; 439 ax->running = 0;
520 wmb(); 440 wmb();
521 441
522 del_timer_sync(&ax->mii_timer);
523 ax_ei_close(dev); 442 ax_ei_close(dev);
524 443
444 /* turn the phy off */
445 ax_phy_switch(dev, 0);
446 phy_disconnect(ax->phy_dev);
447
525 free_irq(dev->irq, dev); 448 free_irq(dev->irq, dev);
526 return 0; 449 return 0;
527} 450}
@@ -529,17 +452,15 @@ static int ax_close(struct net_device *dev)
529static int ax_ioctl(struct net_device *dev, struct ifreq *req, int cmd) 452static int ax_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
530{ 453{
531 struct ax_device *ax = to_ax_dev(dev); 454 struct ax_device *ax = to_ax_dev(dev);
532 unsigned long flags; 455 struct phy_device *phy_dev = ax->phy_dev;
533 int rc;
534 456
535 if (!netif_running(dev)) 457 if (!netif_running(dev))
536 return -EINVAL; 458 return -EINVAL;
537 459
538 spin_lock_irqsave(&ax->mii_lock, flags); 460 if (!phy_dev)
539 rc = generic_mii_ioctl(&ax->mii, if_mii(req), cmd, NULL); 461 return -ENODEV;
540 spin_unlock_irqrestore(&ax->mii_lock, flags);
541 462
542 return rc; 463 return phy_mii_ioctl(phy_dev, req, cmd);
543} 464}
544 465
545/* ethtool ops */ 466/* ethtool ops */
@@ -547,56 +468,40 @@ static int ax_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
547static void ax_get_drvinfo(struct net_device *dev, 468static void ax_get_drvinfo(struct net_device *dev,
548 struct ethtool_drvinfo *info) 469 struct ethtool_drvinfo *info)
549{ 470{
550 struct ax_device *ax = to_ax_dev(dev); 471 struct platform_device *pdev = to_platform_device(dev->dev.parent);
551 472
552 strcpy(info->driver, DRV_NAME); 473 strcpy(info->driver, DRV_NAME);
553 strcpy(info->version, DRV_VERSION); 474 strcpy(info->version, DRV_VERSION);
554 strcpy(info->bus_info, ax->dev->name); 475 strcpy(info->bus_info, pdev->name);
555} 476}
556 477
557static int ax_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 478static int ax_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
558{ 479{
559 struct ax_device *ax = to_ax_dev(dev); 480 struct ax_device *ax = to_ax_dev(dev);
560 unsigned long flags; 481 struct phy_device *phy_dev = ax->phy_dev;
561 482
562 spin_lock_irqsave(&ax->mii_lock, flags); 483 if (!phy_dev)
563 mii_ethtool_gset(&ax->mii, cmd); 484 return -ENODEV;
564 spin_unlock_irqrestore(&ax->mii_lock, flags);
565 485
566 return 0; 486 return phy_ethtool_gset(phy_dev, cmd);
567} 487}
568 488
569static int ax_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 489static int ax_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
570{ 490{
571 struct ax_device *ax = to_ax_dev(dev); 491 struct ax_device *ax = to_ax_dev(dev);
572 unsigned long flags; 492 struct phy_device *phy_dev = ax->phy_dev;
573 int rc;
574 493
575 spin_lock_irqsave(&ax->mii_lock, flags); 494 if (!phy_dev)
576 rc = mii_ethtool_sset(&ax->mii, cmd); 495 return -ENODEV;
577 spin_unlock_irqrestore(&ax->mii_lock, flags);
578
579 return rc;
580}
581
582static int ax_nway_reset(struct net_device *dev)
583{
584 struct ax_device *ax = to_ax_dev(dev);
585 return mii_nway_restart(&ax->mii);
586}
587 496
588static u32 ax_get_link(struct net_device *dev) 497 return phy_ethtool_sset(phy_dev, cmd);
589{
590 struct ax_device *ax = to_ax_dev(dev);
591 return mii_link_ok(&ax->mii);
592} 498}
593 499
594static const struct ethtool_ops ax_ethtool_ops = { 500static const struct ethtool_ops ax_ethtool_ops = {
595 .get_drvinfo = ax_get_drvinfo, 501 .get_drvinfo = ax_get_drvinfo,
596 .get_settings = ax_get_settings, 502 .get_settings = ax_get_settings,
597 .set_settings = ax_set_settings, 503 .set_settings = ax_set_settings,
598 .nway_reset = ax_nway_reset, 504 .get_link = ethtool_op_get_link,
599 .get_link = ax_get_link,
600}; 505};
601 506
602#ifdef CONFIG_AX88796_93CX6 507#ifdef CONFIG_AX88796_93CX6
@@ -640,37 +545,131 @@ static const struct net_device_ops ax_netdev_ops = {
640 .ndo_get_stats = ax_ei_get_stats, 545 .ndo_get_stats = ax_ei_get_stats,
641 .ndo_set_multicast_list = ax_ei_set_multicast_list, 546 .ndo_set_multicast_list = ax_ei_set_multicast_list,
642 .ndo_validate_addr = eth_validate_addr, 547 .ndo_validate_addr = eth_validate_addr,
643 .ndo_set_mac_address = eth_mac_addr, 548 .ndo_set_mac_address = eth_mac_addr,
644 .ndo_change_mtu = eth_change_mtu, 549 .ndo_change_mtu = eth_change_mtu,
645#ifdef CONFIG_NET_POLL_CONTROLLER 550#ifdef CONFIG_NET_POLL_CONTROLLER
646 .ndo_poll_controller = ax_ei_poll, 551 .ndo_poll_controller = ax_ei_poll,
647#endif 552#endif
648}; 553};
649 554
555static void ax_bb_mdc(struct mdiobb_ctrl *ctrl, int level)
556{
557 struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
558
559 if (level)
560 ax->reg_memr |= AX_MEMR_MDC;
561 else
562 ax->reg_memr &= ~AX_MEMR_MDC;
563
564 ei_outb(ax->reg_memr, ax->addr_memr);
565}
566
567static void ax_bb_dir(struct mdiobb_ctrl *ctrl, int output)
568{
569 struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
570
571 if (output)
572 ax->reg_memr &= ~AX_MEMR_MDIR;
573 else
574 ax->reg_memr |= AX_MEMR_MDIR;
575
576 ei_outb(ax->reg_memr, ax->addr_memr);
577}
578
579static void ax_bb_set_data(struct mdiobb_ctrl *ctrl, int value)
580{
581 struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
582
583 if (value)
584 ax->reg_memr |= AX_MEMR_MDO;
585 else
586 ax->reg_memr &= ~AX_MEMR_MDO;
587
588 ei_outb(ax->reg_memr, ax->addr_memr);
589}
590
591static int ax_bb_get_data(struct mdiobb_ctrl *ctrl)
592{
593 struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
594 int reg_memr = ei_inb(ax->addr_memr);
595
596 return reg_memr & AX_MEMR_MDI ? 1 : 0;
597}
598
599static struct mdiobb_ops bb_ops = {
600 .owner = THIS_MODULE,
601 .set_mdc = ax_bb_mdc,
602 .set_mdio_dir = ax_bb_dir,
603 .set_mdio_data = ax_bb_set_data,
604 .get_mdio_data = ax_bb_get_data,
605};
606
650/* setup code */ 607/* setup code */
651 608
609static int ax_mii_init(struct net_device *dev)
610{
611 struct platform_device *pdev = to_platform_device(dev->dev.parent);
612 struct ei_device *ei_local = netdev_priv(dev);
613 struct ax_device *ax = to_ax_dev(dev);
614 int err, i;
615
616 ax->bb_ctrl.ops = &bb_ops;
617 ax->addr_memr = ei_local->mem + AX_MEMR;
618 ax->mii_bus = alloc_mdio_bitbang(&ax->bb_ctrl);
619 if (!ax->mii_bus) {
620 err = -ENOMEM;
621 goto out;
622 }
623
624 ax->mii_bus->name = "ax88796_mii_bus";
625 ax->mii_bus->parent = dev->dev.parent;
626 snprintf(ax->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
627
628 ax->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
629 if (!ax->mii_bus->irq) {
630 err = -ENOMEM;
631 goto out_free_mdio_bitbang;
632 }
633
634 for (i = 0; i < PHY_MAX_ADDR; i++)
635 ax->mii_bus->irq[i] = PHY_POLL;
636
637 err = mdiobus_register(ax->mii_bus);
638 if (err)
639 goto out_free_irq;
640
641 return 0;
642
643 out_free_irq:
644 kfree(ax->mii_bus->irq);
645 out_free_mdio_bitbang:
646 free_mdio_bitbang(ax->mii_bus);
647 out:
648 return err;
649}
650
652static void ax_initial_setup(struct net_device *dev, struct ei_device *ei_local) 651static void ax_initial_setup(struct net_device *dev, struct ei_device *ei_local)
653{ 652{
654 void __iomem *ioaddr = ei_local->mem; 653 void __iomem *ioaddr = ei_local->mem;
655 struct ax_device *ax = to_ax_dev(dev); 654 struct ax_device *ax = to_ax_dev(dev);
656 655
657 /* Select page 0*/ 656 /* Select page 0 */
658 ei_outb(E8390_NODMA+E8390_PAGE0+E8390_STOP, ioaddr + E8390_CMD); 657 ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_STOP, ioaddr + E8390_CMD);
659 658
660 /* set to byte access */ 659 /* set to byte access */
661 ei_outb(ax->plat->dcr_val & ~1, ioaddr + EN0_DCFG); 660 ei_outb(ax->plat->dcr_val & ~1, ioaddr + EN0_DCFG);
662 ei_outb(ax->plat->gpoc_val, ioaddr + EI_SHIFT(0x17)); 661 ei_outb(ax->plat->gpoc_val, ioaddr + EI_SHIFT(0x17));
663} 662}
664 663
665/* ax_init_dev 664/*
665 * ax_init_dev
666 * 666 *
667 * initialise the specified device, taking care to note the MAC 667 * initialise the specified device, taking care to note the MAC
668 * address it may already have (if configured), ensure 668 * address it may already have (if configured), ensure
669 * the device is ready to be used by lib8390.c and registerd with 669 * the device is ready to be used by lib8390.c and registerd with
670 * the network layer. 670 * the network layer.
671 */ 671 */
672 672static int ax_init_dev(struct net_device *dev)
673static int ax_init_dev(struct net_device *dev, int first_init)
674{ 673{
675 struct ei_device *ei_local = netdev_priv(dev); 674 struct ei_device *ei_local = netdev_priv(dev);
676 struct ax_device *ax = to_ax_dev(dev); 675 struct ax_device *ax = to_ax_dev(dev);
@@ -690,23 +689,23 @@ static int ax_init_dev(struct net_device *dev, int first_init)
690 689
691 /* read the mac from the card prom if we need it */ 690 /* read the mac from the card prom if we need it */
692 691
693 if (first_init && ax->plat->flags & AXFLG_HAS_EEPROM) { 692 if (ax->plat->flags & AXFLG_HAS_EEPROM) {
694 unsigned char SA_prom[32]; 693 unsigned char SA_prom[32];
695 694
696 for(i = 0; i < sizeof(SA_prom); i+=2) { 695 for (i = 0; i < sizeof(SA_prom); i += 2) {
697 SA_prom[i] = ei_inb(ioaddr + NE_DATAPORT); 696 SA_prom[i] = ei_inb(ioaddr + NE_DATAPORT);
698 SA_prom[i+1] = ei_inb(ioaddr + NE_DATAPORT); 697 SA_prom[i + 1] = ei_inb(ioaddr + NE_DATAPORT);
699 } 698 }
700 699
701 if (ax->plat->wordlength == 2) 700 if (ax->plat->wordlength == 2)
702 for (i = 0; i < 16; i++) 701 for (i = 0; i < 16; i++)
703 SA_prom[i] = SA_prom[i+i]; 702 SA_prom[i] = SA_prom[i+i];
704 703
705 memcpy(dev->dev_addr, SA_prom, 6); 704 memcpy(dev->dev_addr, SA_prom, 6);
706 } 705 }
707 706
708#ifdef CONFIG_AX88796_93CX6 707#ifdef CONFIG_AX88796_93CX6
709 if (first_init && ax->plat->flags & AXFLG_HAS_93CX6) { 708 if (ax->plat->flags & AXFLG_HAS_93CX6) {
710 unsigned char mac_addr[6]; 709 unsigned char mac_addr[6];
711 struct eeprom_93cx6 eeprom; 710 struct eeprom_93cx6 eeprom;
712 711
@@ -719,7 +718,7 @@ static int ax_init_dev(struct net_device *dev, int first_init)
719 (__le16 __force *)mac_addr, 718 (__le16 __force *)mac_addr,
720 sizeof(mac_addr) >> 1); 719 sizeof(mac_addr) >> 1);
721 720
722 memcpy(dev->dev_addr, mac_addr, 6); 721 memcpy(dev->dev_addr, mac_addr, 6);
723 } 722 }
724#endif 723#endif
725 if (ax->plat->wordlength == 2) { 724 if (ax->plat->wordlength == 2) {
@@ -732,67 +731,56 @@ static int ax_init_dev(struct net_device *dev, int first_init)
732 stop_page = NE1SM_STOP_PG; 731 stop_page = NE1SM_STOP_PG;
733 } 732 }
734 733
735 /* load the mac-address from the device if this is the 734 /* load the mac-address from the device */
736 * first time we've initialised */ 735 if (ax->plat->flags & AXFLG_MAC_FROMDEV) {
737 736 ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP,
738 if (first_init) { 737 ei_local->mem + E8390_CMD); /* 0x61 */
739 if (ax->plat->flags & AXFLG_MAC_FROMDEV) { 738 for (i = 0; i < ETHER_ADDR_LEN; i++)
740 ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP, 739 dev->dev_addr[i] =
741 ei_local->mem + E8390_CMD); /* 0x61 */ 740 ei_inb(ioaddr + EN1_PHYS_SHIFT(i));
742 for (i = 0; i < ETHER_ADDR_LEN; i++)
743 dev->dev_addr[i] =
744 ei_inb(ioaddr + EN1_PHYS_SHIFT(i));
745 }
746
747 if ((ax->plat->flags & AXFLG_MAC_FROMPLATFORM) &&
748 ax->plat->mac_addr)
749 memcpy(dev->dev_addr, ax->plat->mac_addr,
750 ETHER_ADDR_LEN);
751 } 741 }
752 742
743 if ((ax->plat->flags & AXFLG_MAC_FROMPLATFORM) &&
744 ax->plat->mac_addr)
745 memcpy(dev->dev_addr, ax->plat->mac_addr,
746 ETHER_ADDR_LEN);
747
753 ax_reset_8390(dev); 748 ax_reset_8390(dev);
754 749
755 ei_status.name = "AX88796"; 750 ei_local->name = "AX88796";
756 ei_status.tx_start_page = start_page; 751 ei_local->tx_start_page = start_page;
757 ei_status.stop_page = stop_page; 752 ei_local->stop_page = stop_page;
758 ei_status.word16 = (ax->plat->wordlength == 2); 753 ei_local->word16 = (ax->plat->wordlength == 2);
759 ei_status.rx_start_page = start_page + TX_PAGES; 754 ei_local->rx_start_page = start_page + TX_PAGES;
760 755
761#ifdef PACKETBUF_MEMSIZE 756#ifdef PACKETBUF_MEMSIZE
762 /* Allow the packet buffer size to be overridden by know-it-alls. */ 757 /* Allow the packet buffer size to be overridden by know-it-alls. */
763 ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE; 758 ei_local->stop_page = ei_local->tx_start_page + PACKETBUF_MEMSIZE;
764#endif 759#endif
765 760
766 ei_status.reset_8390 = &ax_reset_8390; 761 ei_local->reset_8390 = &ax_reset_8390;
767 ei_status.block_input = &ax_block_input; 762 ei_local->block_input = &ax_block_input;
768 ei_status.block_output = &ax_block_output; 763 ei_local->block_output = &ax_block_output;
769 ei_status.get_8390_hdr = &ax_get_8390_hdr; 764 ei_local->get_8390_hdr = &ax_get_8390_hdr;
770 ei_status.priv = 0; 765 ei_local->priv = 0;
771
772 dev->netdev_ops = &ax_netdev_ops;
773 dev->ethtool_ops = &ax_ethtool_ops;
774
775 ax->msg_enable = NETIF_MSG_LINK;
776 ax->mii.phy_id_mask = 0x1f;
777 ax->mii.reg_num_mask = 0x1f;
778 ax->mii.phy_id = 0x10; /* onboard phy */
779 ax->mii.force_media = 0;
780 ax->mii.full_duplex = 0;
781 ax->mii.mdio_read = ax_phy_read;
782 ax->mii.mdio_write = ax_phy_write;
783 ax->mii.dev = dev;
784 766
785 ax_NS8390_init(dev, 0); 767 dev->netdev_ops = &ax_netdev_ops;
768 dev->ethtool_ops = &ax_ethtool_ops;
786 769
787 if (first_init) 770 ret = ax_mii_init(dev);
788 dev_info(&ax->dev->dev, "%dbit, irq %d, %lx, MAC: %pM\n", 771 if (ret)
789 ei_status.word16 ? 16:8, dev->irq, dev->base_addr, 772 goto out_irq;
790 dev->dev_addr); 773
774 ax_NS8390_init(dev, 0);
791 775
792 ret = register_netdev(dev); 776 ret = register_netdev(dev);
793 if (ret) 777 if (ret)
794 goto out_irq; 778 goto out_irq;
795 779
780 netdev_info(dev, "%dbit, irq %d, %lx, MAC: %pM\n",
781 ei_local->word16 ? 16 : 8, dev->irq, dev->base_addr,
782 dev->dev_addr);
783
796 return 0; 784 return 0;
797 785
798 out_irq: 786 out_irq:
@@ -802,24 +790,24 @@ static int ax_init_dev(struct net_device *dev, int first_init)
802 return ret; 790 return ret;
803} 791}
804 792
805static int ax_remove(struct platform_device *_dev) 793static int ax_remove(struct platform_device *pdev)
806{ 794{
807 struct net_device *dev = platform_get_drvdata(_dev); 795 struct net_device *dev = platform_get_drvdata(pdev);
808 struct ax_device *ax; 796 struct ei_device *ei_local = netdev_priv(dev);
809 797 struct ax_device *ax = to_ax_dev(dev);
810 ax = to_ax_dev(dev); 798 struct resource *mem;
811 799
812 unregister_netdev(dev); 800 unregister_netdev(dev);
813 free_irq(dev->irq, dev); 801 free_irq(dev->irq, dev);
814 802
815 iounmap(ei_status.mem); 803 iounmap(ei_local->mem);
816 release_resource(ax->mem); 804 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
817 kfree(ax->mem); 805 release_mem_region(mem->start, resource_size(mem));
818 806
819 if (ax->map2) { 807 if (ax->map2) {
820 iounmap(ax->map2); 808 iounmap(ax->map2);
821 release_resource(ax->mem2); 809 mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
822 kfree(ax->mem2); 810 release_mem_region(mem->start, resource_size(mem));
823 } 811 }
824 812
825 free_netdev(dev); 813 free_netdev(dev);
@@ -827,19 +815,20 @@ static int ax_remove(struct platform_device *_dev)
827 return 0; 815 return 0;
828} 816}
829 817
830/* ax_probe 818/*
819 * ax_probe
831 * 820 *
832 * This is the entry point when the platform device system uses to 821 * This is the entry point when the platform device system uses to
833 * notify us of a new device to attach to. Allocate memory, find 822 * notify us of a new device to attach to. Allocate memory, find the
834 * the resources and information passed, and map the necessary registers. 823 * resources and information passed, and map the necessary registers.
835*/ 824 */
836
837static int ax_probe(struct platform_device *pdev) 825static int ax_probe(struct platform_device *pdev)
838{ 826{
839 struct net_device *dev; 827 struct net_device *dev;
840 struct ax_device *ax; 828 struct ei_device *ei_local;
841 struct resource *res; 829 struct ax_device *ax;
842 size_t size; 830 struct resource *irq, *mem, *mem2;
831 resource_size_t mem_size, mem2_size = 0;
843 int ret = 0; 832 int ret = 0;
844 833
845 dev = ax__alloc_ei_netdev(sizeof(struct ax_device)); 834 dev = ax__alloc_ei_netdev(sizeof(struct ax_device));
@@ -847,120 +836,107 @@ static int ax_probe(struct platform_device *pdev)
847 return -ENOMEM; 836 return -ENOMEM;
848 837
849 /* ok, let's setup our device */ 838 /* ok, let's setup our device */
839 SET_NETDEV_DEV(dev, &pdev->dev);
840 ei_local = netdev_priv(dev);
850 ax = to_ax_dev(dev); 841 ax = to_ax_dev(dev);
851 842
852 memset(ax, 0, sizeof(struct ax_device));
853
854 spin_lock_init(&ax->mii_lock);
855
856 ax->dev = pdev;
857 ax->plat = pdev->dev.platform_data; 843 ax->plat = pdev->dev.platform_data;
858 platform_set_drvdata(pdev, dev); 844 platform_set_drvdata(pdev, dev);
859 845
860 ei_status.rxcr_base = ax->plat->rcr_val; 846 ei_local->rxcr_base = ax->plat->rcr_val;
861 847
862 /* find the platform resources */ 848 /* find the platform resources */
863 849 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
864 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 850 if (!irq) {
865 if (res == NULL) {
866 dev_err(&pdev->dev, "no IRQ specified\n"); 851 dev_err(&pdev->dev, "no IRQ specified\n");
867 ret = -ENXIO; 852 ret = -ENXIO;
868 goto exit_mem; 853 goto exit_mem;
869 } 854 }
870 855
871 dev->irq = res->start; 856 dev->irq = irq->start;
872 ax->irqflags = res->flags & IRQF_TRIGGER_MASK; 857 ax->irqflags = irq->flags & IRQF_TRIGGER_MASK;
873 858
874 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 859 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
875 if (res == NULL) { 860 if (!mem) {
876 dev_err(&pdev->dev, "no MEM specified\n"); 861 dev_err(&pdev->dev, "no MEM specified\n");
877 ret = -ENXIO; 862 ret = -ENXIO;
878 goto exit_mem; 863 goto exit_mem;
879 } 864 }
880 865
881 size = (res->end - res->start) + 1; 866 mem_size = resource_size(mem);
882
883 /* setup the register offsets from either the platform data
884 * or by using the size of the resource provided */
885 867
868 /*
869 * setup the register offsets from either the platform data or
870 * by using the size of the resource provided
871 */
886 if (ax->plat->reg_offsets) 872 if (ax->plat->reg_offsets)
887 ei_status.reg_offset = ax->plat->reg_offsets; 873 ei_local->reg_offset = ax->plat->reg_offsets;
888 else { 874 else {
889 ei_status.reg_offset = ax->reg_offsets; 875 ei_local->reg_offset = ax->reg_offsets;
890 for (ret = 0; ret < 0x18; ret++) 876 for (ret = 0; ret < 0x18; ret++)
891 ax->reg_offsets[ret] = (size / 0x18) * ret; 877 ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
892 } 878 }
893 879
894 ax->mem = request_mem_region(res->start, size, pdev->name); 880 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
895 if (ax->mem == NULL) {
896 dev_err(&pdev->dev, "cannot reserve registers\n"); 881 dev_err(&pdev->dev, "cannot reserve registers\n");
897 ret = -ENXIO; 882 ret = -ENXIO;
898 goto exit_mem; 883 goto exit_mem;
899 } 884 }
900 885
901 ei_status.mem = ioremap(res->start, size); 886 ei_local->mem = ioremap(mem->start, mem_size);
902 dev->base_addr = (unsigned long)ei_status.mem; 887 dev->base_addr = (unsigned long)ei_local->mem;
903 888
904 if (ei_status.mem == NULL) { 889 if (ei_local->mem == NULL) {
905 dev_err(&pdev->dev, "Cannot ioremap area (%08llx,%08llx)\n", 890 dev_err(&pdev->dev, "Cannot ioremap area %pR\n", mem);
906 (unsigned long long)res->start,
907 (unsigned long long)res->end);
908 891
909 ret = -ENXIO; 892 ret = -ENXIO;
910 goto exit_req; 893 goto exit_req;
911 } 894 }
912 895
913 /* look for reset area */ 896 /* look for reset area */
914 897 mem2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
915 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 898 if (!mem2) {
916 if (res == NULL) {
917 if (!ax->plat->reg_offsets) { 899 if (!ax->plat->reg_offsets) {
918 for (ret = 0; ret < 0x20; ret++) 900 for (ret = 0; ret < 0x20; ret++)
919 ax->reg_offsets[ret] = (size / 0x20) * ret; 901 ax->reg_offsets[ret] = (mem_size / 0x20) * ret;
920 } 902 }
921
922 ax->map2 = NULL;
923 } else { 903 } else {
924 size = (res->end - res->start) + 1; 904 mem2_size = resource_size(mem2);
925 905
926 ax->mem2 = request_mem_region(res->start, size, pdev->name); 906 if (!request_mem_region(mem2->start, mem2_size, pdev->name)) {
927 if (ax->mem2 == NULL) {
928 dev_err(&pdev->dev, "cannot reserve registers\n"); 907 dev_err(&pdev->dev, "cannot reserve registers\n");
929 ret = -ENXIO; 908 ret = -ENXIO;
930 goto exit_mem1; 909 goto exit_mem1;
931 } 910 }
932 911
933 ax->map2 = ioremap(res->start, size); 912 ax->map2 = ioremap(mem2->start, mem2_size);
934 if (ax->map2 == NULL) { 913 if (!ax->map2) {
935 dev_err(&pdev->dev, "cannot map reset register\n"); 914 dev_err(&pdev->dev, "cannot map reset register\n");
936 ret = -ENXIO; 915 ret = -ENXIO;
937 goto exit_mem2; 916 goto exit_mem2;
938 } 917 }
939 918
940 ei_status.reg_offset[0x1f] = ax->map2 - ei_status.mem; 919 ei_local->reg_offset[0x1f] = ax->map2 - ei_local->mem;
941 } 920 }
942 921
943 /* got resources, now initialise and register device */ 922 /* got resources, now initialise and register device */
944 923 ret = ax_init_dev(dev);
945 ret = ax_init_dev(dev, 1);
946 if (!ret) 924 if (!ret)
947 return 0; 925 return 0;
948 926
949 if (ax->map2 == NULL) 927 if (!ax->map2)
950 goto exit_mem1; 928 goto exit_mem1;
951 929
952 iounmap(ax->map2); 930 iounmap(ax->map2);
953 931
954 exit_mem2: 932 exit_mem2:
955 release_resource(ax->mem2); 933 release_mem_region(mem2->start, mem2_size);
956 kfree(ax->mem2);
957 934
958 exit_mem1: 935 exit_mem1:
959 iounmap(ei_status.mem); 936 iounmap(ei_local->mem);
960 937
961 exit_req: 938 exit_req:
962 release_resource(ax->mem); 939 release_mem_region(mem->start, mem_size);
963 kfree(ax->mem);
964 940
965 exit_mem: 941 exit_mem:
966 free_netdev(dev); 942 free_netdev(dev);
@@ -974,7 +950,7 @@ static int ax_probe(struct platform_device *pdev)
974static int ax_suspend(struct platform_device *dev, pm_message_t state) 950static int ax_suspend(struct platform_device *dev, pm_message_t state)
975{ 951{
976 struct net_device *ndev = platform_get_drvdata(dev); 952 struct net_device *ndev = platform_get_drvdata(dev);
977 struct ax_device *ax = to_ax_dev(ndev); 953 struct ax_device *ax = to_ax_dev(ndev);
978 954
979 ax->resume_open = ax->running; 955 ax->resume_open = ax->running;
980 956
@@ -987,7 +963,7 @@ static int ax_suspend(struct platform_device *dev, pm_message_t state)
987static int ax_resume(struct platform_device *pdev) 963static int ax_resume(struct platform_device *pdev)
988{ 964{
989 struct net_device *ndev = platform_get_drvdata(pdev); 965 struct net_device *ndev = platform_get_drvdata(pdev);
990 struct ax_device *ax = to_ax_dev(ndev); 966 struct ax_device *ax = to_ax_dev(ndev);
991 967
992 ax_initial_setup(ndev, netdev_priv(ndev)); 968 ax_initial_setup(ndev, netdev_priv(ndev));
993 ax_NS8390_init(ndev, ax->resume_open); 969 ax_NS8390_init(ndev, ax->resume_open);
@@ -1001,7 +977,7 @@ static int ax_resume(struct platform_device *pdev)
1001 977
1002#else 978#else
1003#define ax_suspend NULL 979#define ax_suspend NULL
1004#define ax_resume NULL 980#define ax_resume NULL
1005#endif 981#endif
1006 982
1007static struct platform_driver axdrv = { 983static struct platform_driver axdrv = {
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index add0b93350d..ed709a5d07d 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -225,6 +225,10 @@ struct be_rx_obj {
225 u32 cache_line_barrier[15]; 225 u32 cache_line_barrier[15];
226}; 226};
227 227
228struct be_drv_stats {
229 u8 be_on_die_temperature;
230};
231
228struct be_vf_cfg { 232struct be_vf_cfg {
229 unsigned char vf_mac_addr[ETH_ALEN]; 233 unsigned char vf_mac_addr[ETH_ALEN];
230 u32 vf_if_handle; 234 u32 vf_if_handle;
@@ -234,6 +238,7 @@ struct be_vf_cfg {
234}; 238};
235 239
236#define BE_INVALID_PMAC_ID 0xffffffff 240#define BE_INVALID_PMAC_ID 0xffffffff
241
237struct be_adapter { 242struct be_adapter {
238 struct pci_dev *pdev; 243 struct pci_dev *pdev;
239 struct net_device *netdev; 244 struct net_device *netdev;
@@ -269,6 +274,7 @@ struct be_adapter {
269 u32 big_page_size; /* Compounded page size shared by rx wrbs */ 274 u32 big_page_size; /* Compounded page size shared by rx wrbs */
270 275
271 u8 msix_vec_next_idx; 276 u8 msix_vec_next_idx;
277 struct be_drv_stats drv_stats;
272 278
273 struct vlan_group *vlan_grp; 279 struct vlan_group *vlan_grp;
274 u16 vlans_added; 280 u16 vlans_added;
@@ -281,6 +287,7 @@ struct be_adapter {
281 struct be_dma_mem stats_cmd; 287 struct be_dma_mem stats_cmd;
282 /* Work queue used to perform periodic tasks like getting statistics */ 288 /* Work queue used to perform periodic tasks like getting statistics */
283 struct delayed_work work; 289 struct delayed_work work;
290 u16 work_counter;
284 291
285 /* Ethtool knobs and info */ 292 /* Ethtool knobs and info */
286 bool rx_csum; /* BE card must perform rx-checksumming */ 293 bool rx_csum; /* BE card must perform rx-checksumming */
@@ -298,7 +305,7 @@ struct be_adapter {
298 u32 rx_fc; /* Rx flow control */ 305 u32 rx_fc; /* Rx flow control */
299 u32 tx_fc; /* Tx flow control */ 306 u32 tx_fc; /* Tx flow control */
300 bool ue_detected; 307 bool ue_detected;
301 bool stats_ioctl_sent; 308 bool stats_cmd_sent;
302 int link_speed; 309 int link_speed;
303 u8 port_type; 310 u8 port_type;
304 u8 transceiver; 311 u8 transceiver;
@@ -311,6 +318,8 @@ struct be_adapter {
311 struct be_vf_cfg vf_cfg[BE_MAX_VF]; 318 struct be_vf_cfg vf_cfg[BE_MAX_VF];
312 u8 is_virtfn; 319 u8 is_virtfn;
313 u32 sli_family; 320 u32 sli_family;
321 u8 hba_port_num;
322 u16 pvid;
314}; 323};
315 324
316#define be_physfn(adapter) (!adapter->is_virtfn) 325#define be_physfn(adapter) (!adapter->is_virtfn)
@@ -450,9 +459,8 @@ static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
450 mac[5] = (u8)(addr & 0xFF); 459 mac[5] = (u8)(addr & 0xFF);
451 mac[4] = (u8)((addr >> 8) & 0xFF); 460 mac[4] = (u8)((addr >> 8) & 0xFF);
452 mac[3] = (u8)((addr >> 16) & 0xFF); 461 mac[3] = (u8)((addr >> 16) & 0xFF);
453 mac[2] = 0xC9; 462 /* Use the OUI from the current MAC address */
454 mac[1] = 0x00; 463 memcpy(mac, adapter->netdev->dev_addr, 3);
455 mac[0] = 0x00;
456} 464}
457 465
458extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, 466extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index a179cc6d79f..1822ecdadc7 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -18,11 +18,20 @@
18#include "be.h" 18#include "be.h"
19#include "be_cmds.h" 19#include "be_cmds.h"
20 20
21/* Must be a power of 2 or else MODULO will BUG_ON */
22static int be_get_temp_freq = 32;
23
21static void be_mcc_notify(struct be_adapter *adapter) 24static void be_mcc_notify(struct be_adapter *adapter)
22{ 25{
23 struct be_queue_info *mccq = &adapter->mcc_obj.q; 26 struct be_queue_info *mccq = &adapter->mcc_obj.q;
24 u32 val = 0; 27 u32 val = 0;
25 28
29 if (adapter->eeh_err) {
30 dev_info(&adapter->pdev->dev,
31 "Error in Card Detected! Cannot issue commands\n");
32 return;
33 }
34
26 val |= mccq->id & DB_MCCQ_RING_ID_MASK; 35 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
27 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT; 36 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
28 37
@@ -75,7 +84,7 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
75 be_dws_le_to_cpu(&resp->hw_stats, 84 be_dws_le_to_cpu(&resp->hw_stats,
76 sizeof(resp->hw_stats)); 85 sizeof(resp->hw_stats));
77 netdev_stats_update(adapter); 86 netdev_stats_update(adapter);
78 adapter->stats_ioctl_sent = false; 87 adapter->stats_cmd_sent = false;
79 } 88 }
80 } else if ((compl_status != MCC_STATUS_NOT_SUPPORTED) && 89 } else if ((compl_status != MCC_STATUS_NOT_SUPPORTED) &&
81 (compl->tag0 != OPCODE_COMMON_NTWK_MAC_QUERY)) { 90 (compl->tag0 != OPCODE_COMMON_NTWK_MAC_QUERY)) {
@@ -102,6 +111,7 @@ static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
102{ 111{
103 if (evt->valid) { 112 if (evt->valid) {
104 adapter->vlan_prio_bmap = evt->available_priority_bmap; 113 adapter->vlan_prio_bmap = evt->available_priority_bmap;
114 adapter->recommended_prio &= ~VLAN_PRIO_MASK;
105 adapter->recommended_prio = 115 adapter->recommended_prio =
106 evt->reco_default_priority << VLAN_PRIO_SHIFT; 116 evt->reco_default_priority << VLAN_PRIO_SHIFT;
107 } 117 }
@@ -117,6 +127,16 @@ static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
117 } 127 }
118} 128}
119 129
130/*Grp5 PVID evt*/
131static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
132 struct be_async_event_grp5_pvid_state *evt)
133{
134 if (evt->enabled)
135 adapter->pvid = evt->tag;
136 else
137 adapter->pvid = 0;
138}
139
120static void be_async_grp5_evt_process(struct be_adapter *adapter, 140static void be_async_grp5_evt_process(struct be_adapter *adapter,
121 u32 trailer, struct be_mcc_compl *evt) 141 u32 trailer, struct be_mcc_compl *evt)
122{ 142{
@@ -134,6 +154,10 @@ static void be_async_grp5_evt_process(struct be_adapter *adapter,
134 be_async_grp5_qos_speed_process(adapter, 154 be_async_grp5_qos_speed_process(adapter,
135 (struct be_async_event_grp5_qos_link_speed *)evt); 155 (struct be_async_event_grp5_qos_link_speed *)evt);
136 break; 156 break;
157 case ASYNC_EVENT_PVID_STATE:
158 be_async_grp5_pvid_state_process(adapter,
159 (struct be_async_event_grp5_pvid_state *)evt);
160 break;
137 default: 161 default:
138 dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n"); 162 dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
139 break; 163 break;
@@ -216,6 +240,9 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
216 int i, num, status = 0; 240 int i, num, status = 0;
217 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; 241 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
218 242
243 if (adapter->eeh_err)
244 return -EIO;
245
219 for (i = 0; i < mcc_timeout; i++) { 246 for (i = 0; i < mcc_timeout; i++) {
220 num = be_process_mcc(adapter, &status); 247 num = be_process_mcc(adapter, &status);
221 if (num) 248 if (num)
@@ -245,6 +272,12 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
245 int msecs = 0; 272 int msecs = 0;
246 u32 ready; 273 u32 ready;
247 274
275 if (adapter->eeh_err) {
276 dev_err(&adapter->pdev->dev,
277 "Error detected in card.Cannot issue commands\n");
278 return -EIO;
279 }
280
248 do { 281 do {
249 ready = ioread32(db); 282 ready = ioread32(db);
250 if (ready == 0xffffffff) { 283 if (ready == 0xffffffff) {
@@ -598,7 +631,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
598 631
599/* Uses synchronous MCCQ */ 632/* Uses synchronous MCCQ */
600int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, 633int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
601 u32 if_id, u32 *pmac_id) 634 u32 if_id, u32 *pmac_id, u32 domain)
602{ 635{
603 struct be_mcc_wrb *wrb; 636 struct be_mcc_wrb *wrb;
604 struct be_cmd_req_pmac_add *req; 637 struct be_cmd_req_pmac_add *req;
@@ -619,6 +652,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
619 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 652 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
620 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req)); 653 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
621 654
655 req->hdr.domain = domain;
622 req->if_id = cpu_to_le32(if_id); 656 req->if_id = cpu_to_le32(if_id);
623 memcpy(req->mac_address, mac_addr, ETH_ALEN); 657 memcpy(req->mac_address, mac_addr, ETH_ALEN);
624 658
@@ -634,7 +668,7 @@ err:
634} 668}
635 669
636/* Uses synchronous MCCQ */ 670/* Uses synchronous MCCQ */
637int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id) 671int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id, u32 dom)
638{ 672{
639 struct be_mcc_wrb *wrb; 673 struct be_mcc_wrb *wrb;
640 struct be_cmd_req_pmac_del *req; 674 struct be_cmd_req_pmac_del *req;
@@ -655,6 +689,7 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
655 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 689 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
656 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req)); 690 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
657 691
692 req->hdr.domain = dom;
658 req->if_id = cpu_to_le32(if_id); 693 req->if_id = cpu_to_le32(if_id);
659 req->pmac_id = cpu_to_le32(pmac_id); 694 req->pmac_id = cpu_to_le32(pmac_id);
660 695
@@ -995,7 +1030,7 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
995} 1030}
996 1031
997/* Uses mbox */ 1032/* Uses mbox */
998int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id) 1033int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain)
999{ 1034{
1000 struct be_mcc_wrb *wrb; 1035 struct be_mcc_wrb *wrb;
1001 struct be_cmd_req_if_destroy *req; 1036 struct be_cmd_req_if_destroy *req;
@@ -1016,6 +1051,7 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
1016 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1051 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1017 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req)); 1052 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
1018 1053
1054 req->hdr.domain = domain;
1019 req->interface_id = cpu_to_le32(interface_id); 1055 req->interface_id = cpu_to_le32(interface_id);
1020 1056
1021 status = be_mbox_notify_wait(adapter); 1057 status = be_mbox_notify_wait(adapter);
@@ -1036,6 +1072,9 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1036 struct be_sge *sge; 1072 struct be_sge *sge;
1037 int status = 0; 1073 int status = 0;
1038 1074
1075 if (MODULO(adapter->work_counter, be_get_temp_freq) == 0)
1076 be_cmd_get_die_temperature(adapter);
1077
1039 spin_lock_bh(&adapter->mcc_lock); 1078 spin_lock_bh(&adapter->mcc_lock);
1040 1079
1041 wrb = wrb_from_mccq(adapter); 1080 wrb = wrb_from_mccq(adapter);
@@ -1056,7 +1095,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1056 sge->len = cpu_to_le32(nonemb_cmd->size); 1095 sge->len = cpu_to_le32(nonemb_cmd->size);
1057 1096
1058 be_mcc_notify(adapter); 1097 be_mcc_notify(adapter);
1059 adapter->stats_ioctl_sent = true; 1098 adapter->stats_cmd_sent = true;
1060 1099
1061err: 1100err:
1062 spin_unlock_bh(&adapter->mcc_lock); 1101 spin_unlock_bh(&adapter->mcc_lock);
@@ -1103,6 +1142,44 @@ err:
1103 return status; 1142 return status;
1104} 1143}
1105 1144
1145/* Uses synchronous mcc */
1146int be_cmd_get_die_temperature(struct be_adapter *adapter)
1147{
1148 struct be_mcc_wrb *wrb;
1149 struct be_cmd_req_get_cntl_addnl_attribs *req;
1150 int status;
1151
1152 spin_lock_bh(&adapter->mcc_lock);
1153
1154 wrb = wrb_from_mccq(adapter);
1155 if (!wrb) {
1156 status = -EBUSY;
1157 goto err;
1158 }
1159 req = embedded_payload(wrb);
1160
1161 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1162 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES);
1163
1164 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1165 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req));
1166
1167 status = be_mcc_notify_wait(adapter);
1168 if (!status) {
1169 struct be_cmd_resp_get_cntl_addnl_attribs *resp =
1170 embedded_payload(wrb);
1171 adapter->drv_stats.be_on_die_temperature =
1172 resp->on_die_temperature;
1173 }
1174 /* If IOCTL fails once, do not bother issuing it again */
1175 else
1176 be_get_temp_freq = 0;
1177
1178err:
1179 spin_unlock_bh(&adapter->mcc_lock);
1180 return status;
1181}
1182
1106/* Uses Mbox */ 1183/* Uses Mbox */
1107int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver) 1184int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
1108{ 1185{
@@ -1868,8 +1945,8 @@ int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
1868 OPCODE_COMMON_SET_QOS, sizeof(*req)); 1945 OPCODE_COMMON_SET_QOS, sizeof(*req));
1869 1946
1870 req->hdr.domain = domain; 1947 req->hdr.domain = domain;
1871 req->valid_bits = BE_QOS_BITS_NIC; 1948 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
1872 req->max_bps_nic = bps; 1949 req->max_bps_nic = cpu_to_le32(bps);
1873 1950
1874 status = be_mcc_notify_wait(adapter); 1951 status = be_mcc_notify_wait(adapter);
1875 1952
@@ -1877,3 +1954,57 @@ err:
1877 spin_unlock_bh(&adapter->mcc_lock); 1954 spin_unlock_bh(&adapter->mcc_lock);
1878 return status; 1955 return status;
1879} 1956}
1957
1958int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
1959{
1960 struct be_mcc_wrb *wrb;
1961 struct be_cmd_req_cntl_attribs *req;
1962 struct be_cmd_resp_cntl_attribs *resp;
1963 struct be_sge *sge;
1964 int status;
1965 int payload_len = max(sizeof(*req), sizeof(*resp));
1966 struct mgmt_controller_attrib *attribs;
1967 struct be_dma_mem attribs_cmd;
1968
1969 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
1970 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
1971 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
1972 &attribs_cmd.dma);
1973 if (!attribs_cmd.va) {
1974 dev_err(&adapter->pdev->dev,
1975 "Memory allocation failure\n");
1976 return -ENOMEM;
1977 }
1978
1979 if (mutex_lock_interruptible(&adapter->mbox_lock))
1980 return -1;
1981
1982 wrb = wrb_from_mbox(adapter);
1983 if (!wrb) {
1984 status = -EBUSY;
1985 goto err;
1986 }
1987 req = attribs_cmd.va;
1988 sge = nonembedded_sgl(wrb);
1989
1990 be_wrb_hdr_prepare(wrb, payload_len, false, 1,
1991 OPCODE_COMMON_GET_CNTL_ATTRIBUTES);
1992 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1993 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len);
1994 sge->pa_hi = cpu_to_le32(upper_32_bits(attribs_cmd.dma));
1995 sge->pa_lo = cpu_to_le32(attribs_cmd.dma & 0xFFFFFFFF);
1996 sge->len = cpu_to_le32(attribs_cmd.size);
1997
1998 status = be_mbox_notify_wait(adapter);
1999 if (!status) {
2000 attribs = (struct mgmt_controller_attrib *)( attribs_cmd.va +
2001 sizeof(struct be_cmd_resp_hdr));
2002 adapter->hba_port_num = attribs->hba_attribs.phy_port;
2003 }
2004
2005err:
2006 mutex_unlock(&adapter->mbox_lock);
2007 pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
2008 attribs_cmd.dma);
2009 return status;
2010}
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 83d15c8a9fa..93e5768fc70 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -88,6 +88,7 @@ struct be_mcc_compl {
88#define ASYNC_EVENT_CODE_GRP_5 0x5 88#define ASYNC_EVENT_CODE_GRP_5 0x5
89#define ASYNC_EVENT_QOS_SPEED 0x1 89#define ASYNC_EVENT_QOS_SPEED 0x1
90#define ASYNC_EVENT_COS_PRIORITY 0x2 90#define ASYNC_EVENT_COS_PRIORITY 0x2
91#define ASYNC_EVENT_PVID_STATE 0x3
91struct be_async_event_trailer { 92struct be_async_event_trailer {
92 u32 code; 93 u32 code;
93}; 94};
@@ -134,6 +135,18 @@ struct be_async_event_grp5_cos_priority {
134 struct be_async_event_trailer trailer; 135 struct be_async_event_trailer trailer;
135} __packed; 136} __packed;
136 137
138/* When the event code of an async trailer is GRP5 and event type is
139 * PVID state, the mcc_compl must be interpreted as follows
140 */
141struct be_async_event_grp5_pvid_state {
142 u8 enabled;
143 u8 rsvd0;
144 u16 tag;
145 u32 event_tag;
146 u32 rsvd1;
147 struct be_async_event_trailer trailer;
148} __packed;
149
137struct be_mcc_mailbox { 150struct be_mcc_mailbox {
138 struct be_mcc_wrb wrb; 151 struct be_mcc_wrb wrb;
139 struct be_mcc_compl compl; 152 struct be_mcc_compl compl;
@@ -156,6 +169,7 @@ struct be_mcc_mailbox {
156#define OPCODE_COMMON_SET_QOS 28 169#define OPCODE_COMMON_SET_QOS 28
157#define OPCODE_COMMON_MCC_CREATE_EXT 90 170#define OPCODE_COMMON_MCC_CREATE_EXT 90
158#define OPCODE_COMMON_SEEPROM_READ 30 171#define OPCODE_COMMON_SEEPROM_READ 30
172#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES 32
159#define OPCODE_COMMON_NTWK_RX_FILTER 34 173#define OPCODE_COMMON_NTWK_RX_FILTER 34
160#define OPCODE_COMMON_GET_FW_VERSION 35 174#define OPCODE_COMMON_GET_FW_VERSION 35
161#define OPCODE_COMMON_SET_FLOW_CONTROL 36 175#define OPCODE_COMMON_SET_FLOW_CONTROL 36
@@ -176,6 +190,7 @@ struct be_mcc_mailbox {
176#define OPCODE_COMMON_GET_BEACON_STATE 70 190#define OPCODE_COMMON_GET_BEACON_STATE 70
177#define OPCODE_COMMON_READ_TRANSRECV_DATA 73 191#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
178#define OPCODE_COMMON_GET_PHY_DETAILS 102 192#define OPCODE_COMMON_GET_PHY_DETAILS 102
193#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
179 194
180#define OPCODE_ETH_RSS_CONFIG 1 195#define OPCODE_ETH_RSS_CONFIG 1
181#define OPCODE_ETH_ACPI_CONFIG 2 196#define OPCODE_ETH_ACPI_CONFIG 2
@@ -619,7 +634,10 @@ struct be_rxf_stats {
619 u32 rx_drops_invalid_ring; /* dword 145*/ 634 u32 rx_drops_invalid_ring; /* dword 145*/
620 u32 forwarded_packets; /* dword 146*/ 635 u32 forwarded_packets; /* dword 146*/
621 u32 rx_drops_mtu; /* dword 147*/ 636 u32 rx_drops_mtu; /* dword 147*/
622 u32 rsvd0[15]; 637 u32 rsvd0[7];
638 u32 port0_jabber_events;
639 u32 port1_jabber_events;
640 u32 rsvd1[6];
623}; 641};
624 642
625struct be_erx_stats { 643struct be_erx_stats {
@@ -630,11 +648,16 @@ struct be_erx_stats {
630 u32 debug_pmem_pbuf_dealloc; /* dword 47*/ 648 u32 debug_pmem_pbuf_dealloc; /* dword 47*/
631}; 649};
632 650
651struct be_pmem_stats {
652 u32 eth_red_drops;
653 u32 rsvd[4];
654};
655
633struct be_hw_stats { 656struct be_hw_stats {
634 struct be_rxf_stats rxf; 657 struct be_rxf_stats rxf;
635 u32 rsvd[48]; 658 u32 rsvd[48];
636 struct be_erx_stats erx; 659 struct be_erx_stats erx;
637 u32 rsvd1[6]; 660 struct be_pmem_stats pmem;
638}; 661};
639 662
640struct be_cmd_req_get_stats { 663struct be_cmd_req_get_stats {
@@ -647,6 +670,20 @@ struct be_cmd_resp_get_stats {
647 struct be_hw_stats hw_stats; 670 struct be_hw_stats hw_stats;
648}; 671};
649 672
673struct be_cmd_req_get_cntl_addnl_attribs {
674 struct be_cmd_req_hdr hdr;
675 u8 rsvd[8];
676};
677
678struct be_cmd_resp_get_cntl_addnl_attribs {
679 struct be_cmd_resp_hdr hdr;
680 u16 ipl_file_number;
681 u8 ipl_file_version;
682 u8 rsvd0;
683 u8 on_die_temperature; /* in degrees centigrade*/
684 u8 rsvd1[3];
685};
686
650struct be_cmd_req_vlan_config { 687struct be_cmd_req_vlan_config {
651 struct be_cmd_req_hdr hdr; 688 struct be_cmd_req_hdr hdr;
652 u8 interface_id; 689 u8 interface_id;
@@ -994,17 +1031,29 @@ struct be_cmd_resp_set_qos {
994 u32 rsvd; 1031 u32 rsvd;
995}; 1032};
996 1033
1034/*********************** Controller Attributes ***********************/
1035struct be_cmd_req_cntl_attribs {
1036 struct be_cmd_req_hdr hdr;
1037};
1038
1039struct be_cmd_resp_cntl_attribs {
1040 struct be_cmd_resp_hdr hdr;
1041 struct mgmt_controller_attrib attribs;
1042};
1043
997extern int be_pci_fnum_get(struct be_adapter *adapter); 1044extern int be_pci_fnum_get(struct be_adapter *adapter);
998extern int be_cmd_POST(struct be_adapter *adapter); 1045extern int be_cmd_POST(struct be_adapter *adapter);
999extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, 1046extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
1000 u8 type, bool permanent, u32 if_handle); 1047 u8 type, bool permanent, u32 if_handle);
1001extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, 1048extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
1002 u32 if_id, u32 *pmac_id); 1049 u32 if_id, u32 *pmac_id, u32 domain);
1003extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id); 1050extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id,
1051 u32 pmac_id, u32 domain);
1004extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, 1052extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
1005 u32 en_flags, u8 *mac, bool pmac_invalid, 1053 u32 en_flags, u8 *mac, bool pmac_invalid,
1006 u32 *if_handle, u32 *pmac_id, u32 domain); 1054 u32 *if_handle, u32 *pmac_id, u32 domain);
1007extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle); 1055extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle,
1056 u32 domain);
1008extern int be_cmd_eq_create(struct be_adapter *adapter, 1057extern int be_cmd_eq_create(struct be_adapter *adapter,
1009 struct be_queue_info *eq, int eq_delay); 1058 struct be_queue_info *eq, int eq_delay);
1010extern int be_cmd_cq_create(struct be_adapter *adapter, 1059extern int be_cmd_cq_create(struct be_adapter *adapter,
@@ -1076,4 +1125,6 @@ extern int be_cmd_get_phy_info(struct be_adapter *adapter,
1076 struct be_dma_mem *cmd); 1125 struct be_dma_mem *cmd);
1077extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain); 1126extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
1078extern void be_detect_dump_ue(struct be_adapter *adapter); 1127extern void be_detect_dump_ue(struct be_adapter *adapter);
1128extern int be_cmd_get_die_temperature(struct be_adapter *adapter);
1129extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
1079 1130
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index b4be0271efe..6e5e43380c2 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -26,7 +26,8 @@ struct be_ethtool_stat {
26 int offset; 26 int offset;
27}; 27};
28 28
29enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT_TX, DRVSTAT_RX, ERXSTAT}; 29enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT_TX, DRVSTAT_RX, ERXSTAT,
30 PMEMSTAT, DRVSTAT};
30#define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \ 31#define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \
31 offsetof(_struct, field) 32 offsetof(_struct, field)
32#define NETSTAT_INFO(field) #field, NETSTAT,\ 33#define NETSTAT_INFO(field) #field, NETSTAT,\
@@ -43,6 +44,11 @@ enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT_TX, DRVSTAT_RX, ERXSTAT};
43 field) 44 field)
44#define ERXSTAT_INFO(field) #field, ERXSTAT,\ 45#define ERXSTAT_INFO(field) #field, ERXSTAT,\
45 FIELDINFO(struct be_erx_stats, field) 46 FIELDINFO(struct be_erx_stats, field)
47#define PMEMSTAT_INFO(field) #field, PMEMSTAT,\
48 FIELDINFO(struct be_pmem_stats, field)
49#define DRVSTAT_INFO(field) #field, DRVSTAT,\
50 FIELDINFO(struct be_drv_stats, \
51 field)
46 52
47static const struct be_ethtool_stat et_stats[] = { 53static const struct be_ethtool_stat et_stats[] = {
48 {NETSTAT_INFO(rx_packets)}, 54 {NETSTAT_INFO(rx_packets)},
@@ -99,7 +105,11 @@ static const struct be_ethtool_stat et_stats[] = {
99 {MISCSTAT_INFO(rx_drops_too_many_frags)}, 105 {MISCSTAT_INFO(rx_drops_too_many_frags)},
100 {MISCSTAT_INFO(rx_drops_invalid_ring)}, 106 {MISCSTAT_INFO(rx_drops_invalid_ring)},
101 {MISCSTAT_INFO(forwarded_packets)}, 107 {MISCSTAT_INFO(forwarded_packets)},
102 {MISCSTAT_INFO(rx_drops_mtu)} 108 {MISCSTAT_INFO(rx_drops_mtu)},
109 {MISCSTAT_INFO(port0_jabber_events)},
110 {MISCSTAT_INFO(port1_jabber_events)},
111 {PMEMSTAT_INFO(eth_red_drops)},
112 {DRVSTAT_INFO(be_on_die_temperature)}
103}; 113};
104#define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats) 114#define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
105 115
@@ -121,7 +131,7 @@ static const char et_self_tests[][ETH_GSTRING_LEN] = {
121 "MAC Loopback test", 131 "MAC Loopback test",
122 "PHY Loopback test", 132 "PHY Loopback test",
123 "External Loopback test", 133 "External Loopback test",
124 "DDR DMA test" 134 "DDR DMA test",
125 "Link test" 135 "Link test"
126}; 136};
127 137
@@ -276,6 +286,12 @@ be_get_ethtool_stats(struct net_device *netdev,
276 case MISCSTAT: 286 case MISCSTAT:
277 p = &hw_stats->rxf; 287 p = &hw_stats->rxf;
278 break; 288 break;
289 case PMEMSTAT:
290 p = &hw_stats->pmem;
291 break;
292 case DRVSTAT:
293 p = &adapter->drv_stats;
294 break;
279 } 295 }
280 296
281 p = (u8 *)p + et_stats[i].offset; 297 p = (u8 *)p + et_stats[i].offset;
@@ -376,8 +392,9 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
376 } 392 }
377 393
378 phy_cmd.size = sizeof(struct be_cmd_req_get_phy_info); 394 phy_cmd.size = sizeof(struct be_cmd_req_get_phy_info);
379 phy_cmd.va = pci_alloc_consistent(adapter->pdev, phy_cmd.size, 395 phy_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
380 &phy_cmd.dma); 396 phy_cmd.size, &phy_cmd.dma,
397 GFP_KERNEL);
381 if (!phy_cmd.va) { 398 if (!phy_cmd.va) {
382 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 399 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
383 return -ENOMEM; 400 return -ENOMEM;
@@ -416,8 +433,8 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
416 adapter->port_type = ecmd->port; 433 adapter->port_type = ecmd->port;
417 adapter->transceiver = ecmd->transceiver; 434 adapter->transceiver = ecmd->transceiver;
418 adapter->autoneg = ecmd->autoneg; 435 adapter->autoneg = ecmd->autoneg;
419 pci_free_consistent(adapter->pdev, phy_cmd.size, 436 dma_free_coherent(&adapter->pdev->dev, phy_cmd.size, phy_cmd.va,
420 phy_cmd.va, phy_cmd.dma); 437 phy_cmd.dma);
421 } else { 438 } else {
422 ecmd->speed = adapter->link_speed; 439 ecmd->speed = adapter->link_speed;
423 ecmd->port = adapter->port_type; 440 ecmd->port = adapter->port_type;
@@ -496,7 +513,7 @@ be_phys_id(struct net_device *netdev, u32 data)
496 int status; 513 int status;
497 u32 cur; 514 u32 cur;
498 515
499 be_cmd_get_beacon_state(adapter, adapter->port_num, &cur); 516 be_cmd_get_beacon_state(adapter, adapter->hba_port_num, &cur);
500 517
501 if (cur == BEACON_STATE_ENABLED) 518 if (cur == BEACON_STATE_ENABLED)
502 return 0; 519 return 0;
@@ -504,23 +521,34 @@ be_phys_id(struct net_device *netdev, u32 data)
504 if (data < 2) 521 if (data < 2)
505 data = 2; 522 data = 2;
506 523
507 status = be_cmd_set_beacon_state(adapter, adapter->port_num, 0, 0, 524 status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
508 BEACON_STATE_ENABLED); 525 BEACON_STATE_ENABLED);
509 set_current_state(TASK_INTERRUPTIBLE); 526 set_current_state(TASK_INTERRUPTIBLE);
510 schedule_timeout(data*HZ); 527 schedule_timeout(data*HZ);
511 528
512 status = be_cmd_set_beacon_state(adapter, adapter->port_num, 0, 0, 529 status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
513 BEACON_STATE_DISABLED); 530 BEACON_STATE_DISABLED);
514 531
515 return status; 532 return status;
516} 533}
517 534
535static bool
536be_is_wol_supported(struct be_adapter *adapter)
537{
538 if (!be_physfn(adapter))
539 return false;
540 else
541 return true;
542}
543
518static void 544static void
519be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 545be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
520{ 546{
521 struct be_adapter *adapter = netdev_priv(netdev); 547 struct be_adapter *adapter = netdev_priv(netdev);
522 548
523 wol->supported = WAKE_MAGIC; 549 if (be_is_wol_supported(adapter))
550 wol->supported = WAKE_MAGIC;
551
524 if (adapter->wol) 552 if (adapter->wol)
525 wol->wolopts = WAKE_MAGIC; 553 wol->wolopts = WAKE_MAGIC;
526 else 554 else
@@ -536,7 +564,7 @@ be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
536 if (wol->wolopts & ~WAKE_MAGIC) 564 if (wol->wolopts & ~WAKE_MAGIC)
537 return -EINVAL; 565 return -EINVAL;
538 566
539 if (wol->wolopts & WAKE_MAGIC) 567 if ((wol->wolopts & WAKE_MAGIC) && be_is_wol_supported(adapter))
540 adapter->wol = true; 568 adapter->wol = true;
541 else 569 else
542 adapter->wol = false; 570 adapter->wol = false;
@@ -554,8 +582,8 @@ be_test_ddr_dma(struct be_adapter *adapter)
554 }; 582 };
555 583
556 ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test); 584 ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
557 ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size, 585 ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size,
558 &ddrdma_cmd.dma); 586 &ddrdma_cmd.dma, GFP_KERNEL);
559 if (!ddrdma_cmd.va) { 587 if (!ddrdma_cmd.va) {
560 dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); 588 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
561 return -ENOMEM; 589 return -ENOMEM;
@@ -569,20 +597,20 @@ be_test_ddr_dma(struct be_adapter *adapter)
569 } 597 }
570 598
571err: 599err:
572 pci_free_consistent(adapter->pdev, ddrdma_cmd.size, 600 dma_free_coherent(&adapter->pdev->dev, ddrdma_cmd.size, ddrdma_cmd.va,
573 ddrdma_cmd.va, ddrdma_cmd.dma); 601 ddrdma_cmd.dma);
574 return ret; 602 return ret;
575} 603}
576 604
577static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type, 605static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
578 u64 *status) 606 u64 *status)
579{ 607{
580 be_cmd_set_loopback(adapter, adapter->port_num, 608 be_cmd_set_loopback(adapter, adapter->hba_port_num,
581 loopback_type, 1); 609 loopback_type, 1);
582 *status = be_cmd_loopback_test(adapter, adapter->port_num, 610 *status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
583 loopback_type, 1500, 611 loopback_type, 1500,
584 2, 0xabc); 612 2, 0xabc);
585 be_cmd_set_loopback(adapter, adapter->port_num, 613 be_cmd_set_loopback(adapter, adapter->hba_port_num,
586 BE_NO_LOOPBACK, 1); 614 BE_NO_LOOPBACK, 1);
587 return *status; 615 return *status;
588} 616}
@@ -621,7 +649,8 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
621 &qos_link_speed) != 0) { 649 &qos_link_speed) != 0) {
622 test->flags |= ETH_TEST_FL_FAILED; 650 test->flags |= ETH_TEST_FL_FAILED;
623 data[4] = -1; 651 data[4] = -1;
624 } else if (mac_speed) { 652 } else if (!mac_speed) {
653 test->flags |= ETH_TEST_FL_FAILED;
625 data[4] = 1; 654 data[4] = 1;
626 } 655 }
627} 656}
@@ -662,8 +691,8 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
662 691
663 memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem)); 692 memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
664 eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read); 693 eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
665 eeprom_cmd.va = pci_alloc_consistent(adapter->pdev, eeprom_cmd.size, 694 eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size,
666 &eeprom_cmd.dma); 695 &eeprom_cmd.dma, GFP_KERNEL);
667 696
668 if (!eeprom_cmd.va) { 697 if (!eeprom_cmd.va) {
669 dev_err(&adapter->pdev->dev, 698 dev_err(&adapter->pdev->dev,
@@ -677,8 +706,8 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
677 resp = (struct be_cmd_resp_seeprom_read *) eeprom_cmd.va; 706 resp = (struct be_cmd_resp_seeprom_read *) eeprom_cmd.va;
678 memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len); 707 memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len);
679 } 708 }
680 pci_free_consistent(adapter->pdev, eeprom_cmd.size, eeprom_cmd.va, 709 dma_free_coherent(&adapter->pdev->dev, eeprom_cmd.size, eeprom_cmd.va,
681 eeprom_cmd.dma); 710 eeprom_cmd.dma);
682 711
683 return status; 712 return status;
684} 713}
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index 4096d977823..3f459f76cd1 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -327,6 +327,53 @@ struct be_eth_rx_compl {
327 u32 dw[4]; 327 u32 dw[4];
328}; 328};
329 329
330struct mgmt_hba_attribs {
331 u8 flashrom_version_string[32];
332 u8 manufacturer_name[32];
333 u32 supported_modes;
334 u32 rsvd0[3];
335 u8 ncsi_ver_string[12];
336 u32 default_extended_timeout;
337 u8 controller_model_number[32];
338 u8 controller_description[64];
339 u8 controller_serial_number[32];
340 u8 ip_version_string[32];
341 u8 firmware_version_string[32];
342 u8 bios_version_string[32];
343 u8 redboot_version_string[32];
344 u8 driver_version_string[32];
345 u8 fw_on_flash_version_string[32];
346 u32 functionalities_supported;
347 u16 max_cdblength;
348 u8 asic_revision;
349 u8 generational_guid[16];
350 u8 hba_port_count;
351 u16 default_link_down_timeout;
352 u8 iscsi_ver_min_max;
353 u8 multifunction_device;
354 u8 cache_valid;
355 u8 hba_status;
356 u8 max_domains_supported;
357 u8 phy_port;
358 u32 firmware_post_status;
359 u32 hba_mtu[8];
360 u32 rsvd1[4];
361};
362
363struct mgmt_controller_attrib {
364 struct mgmt_hba_attribs hba_attribs;
365 u16 pci_vendor_id;
366 u16 pci_device_id;
367 u16 pci_sub_vendor_id;
368 u16 pci_sub_system_id;
369 u8 pci_bus_number;
370 u8 pci_device_number;
371 u8 pci_function_number;
372 u8 interface_type;
373 u64 unique_identifier;
374 u32 rsvd0[5];
375};
376
330struct controller_id { 377struct controller_id {
331 u32 vendor; 378 u32 vendor;
332 u32 device; 379 u32 device;
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 28a32a6c8bf..ef66dc61e6e 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -125,8 +125,8 @@ static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
125{ 125{
126 struct be_dma_mem *mem = &q->dma_mem; 126 struct be_dma_mem *mem = &q->dma_mem;
127 if (mem->va) 127 if (mem->va)
128 pci_free_consistent(adapter->pdev, mem->size, 128 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
129 mem->va, mem->dma); 129 mem->dma);
130} 130}
131 131
132static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q, 132static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
@@ -138,7 +138,8 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
138 q->len = len; 138 q->len = len;
139 q->entry_size = entry_size; 139 q->entry_size = entry_size;
140 mem->size = len * entry_size; 140 mem->size = len * entry_size;
141 mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma); 141 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
142 GFP_KERNEL);
142 if (!mem->va) 143 if (!mem->va)
143 return -1; 144 return -1;
144 memset(mem->va, 0, mem->size); 145 memset(mem->va, 0, mem->size);
@@ -235,12 +236,13 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
235 if (!be_physfn(adapter)) 236 if (!be_physfn(adapter))
236 goto netdev_addr; 237 goto netdev_addr;
237 238
238 status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id); 239 status = be_cmd_pmac_del(adapter, adapter->if_handle,
240 adapter->pmac_id, 0);
239 if (status) 241 if (status)
240 return status; 242 return status;
241 243
242 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data, 244 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
243 adapter->if_handle, &adapter->pmac_id); 245 adapter->if_handle, &adapter->pmac_id, 0);
244netdev_addr: 246netdev_addr:
245 if (!status) 247 if (!status)
246 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 248 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
@@ -484,7 +486,7 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
484 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len); 486 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
485} 487}
486 488
487static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb, 489static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
488 bool unmap_single) 490 bool unmap_single)
489{ 491{
490 dma_addr_t dma; 492 dma_addr_t dma;
@@ -494,11 +496,10 @@ static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb,
494 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo; 496 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
495 if (wrb->frag_len) { 497 if (wrb->frag_len) {
496 if (unmap_single) 498 if (unmap_single)
497 pci_unmap_single(pdev, dma, wrb->frag_len, 499 dma_unmap_single(dev, dma, wrb->frag_len,
498 PCI_DMA_TODEVICE); 500 DMA_TO_DEVICE);
499 else 501 else
500 pci_unmap_page(pdev, dma, wrb->frag_len, 502 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
501 PCI_DMA_TODEVICE);
502 } 503 }
503} 504}
504 505
@@ -507,7 +508,7 @@ static int make_tx_wrbs(struct be_adapter *adapter,
507{ 508{
508 dma_addr_t busaddr; 509 dma_addr_t busaddr;
509 int i, copied = 0; 510 int i, copied = 0;
510 struct pci_dev *pdev = adapter->pdev; 511 struct device *dev = &adapter->pdev->dev;
511 struct sk_buff *first_skb = skb; 512 struct sk_buff *first_skb = skb;
512 struct be_queue_info *txq = &adapter->tx_obj.q; 513 struct be_queue_info *txq = &adapter->tx_obj.q;
513 struct be_eth_wrb *wrb; 514 struct be_eth_wrb *wrb;
@@ -521,9 +522,8 @@ static int make_tx_wrbs(struct be_adapter *adapter,
521 522
522 if (skb->len > skb->data_len) { 523 if (skb->len > skb->data_len) {
523 int len = skb_headlen(skb); 524 int len = skb_headlen(skb);
524 busaddr = pci_map_single(pdev, skb->data, len, 525 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
525 PCI_DMA_TODEVICE); 526 if (dma_mapping_error(dev, busaddr))
526 if (pci_dma_mapping_error(pdev, busaddr))
527 goto dma_err; 527 goto dma_err;
528 map_single = true; 528 map_single = true;
529 wrb = queue_head_node(txq); 529 wrb = queue_head_node(txq);
@@ -536,10 +536,9 @@ static int make_tx_wrbs(struct be_adapter *adapter,
536 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 536 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
537 struct skb_frag_struct *frag = 537 struct skb_frag_struct *frag =
538 &skb_shinfo(skb)->frags[i]; 538 &skb_shinfo(skb)->frags[i];
539 busaddr = pci_map_page(pdev, frag->page, 539 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
540 frag->page_offset, 540 frag->size, DMA_TO_DEVICE);
541 frag->size, PCI_DMA_TODEVICE); 541 if (dma_mapping_error(dev, busaddr))
542 if (pci_dma_mapping_error(pdev, busaddr))
543 goto dma_err; 542 goto dma_err;
544 wrb = queue_head_node(txq); 543 wrb = queue_head_node(txq);
545 wrb_fill(wrb, busaddr, frag->size); 544 wrb_fill(wrb, busaddr, frag->size);
@@ -563,7 +562,7 @@ dma_err:
563 txq->head = map_head; 562 txq->head = map_head;
564 while (copied) { 563 while (copied) {
565 wrb = queue_head_node(txq); 564 wrb = queue_head_node(txq);
566 unmap_tx_frag(pdev, wrb, map_single); 565 unmap_tx_frag(dev, wrb, map_single);
567 map_single = false; 566 map_single = false;
568 copied -= wrb->frag_len; 567 copied -= wrb->frag_len;
569 queue_head_inc(txq); 568 queue_head_inc(txq);
@@ -743,11 +742,11 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
743 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID) 742 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
744 status = be_cmd_pmac_del(adapter, 743 status = be_cmd_pmac_del(adapter,
745 adapter->vf_cfg[vf].vf_if_handle, 744 adapter->vf_cfg[vf].vf_if_handle,
746 adapter->vf_cfg[vf].vf_pmac_id); 745 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
747 746
748 status = be_cmd_pmac_add(adapter, mac, 747 status = be_cmd_pmac_add(adapter, mac,
749 adapter->vf_cfg[vf].vf_if_handle, 748 adapter->vf_cfg[vf].vf_if_handle,
750 &adapter->vf_cfg[vf].vf_pmac_id); 749 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
751 750
752 if (status) 751 if (status)
753 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n", 752 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
@@ -822,7 +821,7 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
822 rate = 10000; 821 rate = 10000;
823 822
824 adapter->vf_cfg[vf].vf_tx_rate = rate; 823 adapter->vf_cfg[vf].vf_tx_rate = rate;
825 status = be_cmd_set_qos(adapter, rate / 10, vf); 824 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
826 825
827 if (status) 826 if (status)
828 dev_info(&adapter->pdev->dev, 827 dev_info(&adapter->pdev->dev,
@@ -888,8 +887,9 @@ get_rx_page_info(struct be_adapter *adapter,
888 BUG_ON(!rx_page_info->page); 887 BUG_ON(!rx_page_info->page);
889 888
890 if (rx_page_info->last_page_user) { 889 if (rx_page_info->last_page_user) {
891 pci_unmap_page(adapter->pdev, dma_unmap_addr(rx_page_info, bus), 890 dma_unmap_page(&adapter->pdev->dev,
892 adapter->big_page_size, PCI_DMA_FROMDEVICE); 891 dma_unmap_addr(rx_page_info, bus),
892 adapter->big_page_size, DMA_FROM_DEVICE);
893 rx_page_info->last_page_user = false; 893 rx_page_info->last_page_user = false;
894 } 894 }
895 895
@@ -1047,6 +1047,9 @@ static void be_rx_compl_process(struct be_adapter *adapter,
1047 if ((adapter->function_mode & 0x400) && !vtm) 1047 if ((adapter->function_mode & 0x400) && !vtm)
1048 vlanf = 0; 1048 vlanf = 0;
1049 1049
1050 if ((adapter->pvid == vlanf) && !adapter->vlan_tag[vlanf])
1051 vlanf = 0;
1052
1050 if (unlikely(vlanf)) { 1053 if (unlikely(vlanf)) {
1051 if (!adapter->vlan_grp || adapter->vlans_added == 0) { 1054 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1052 kfree_skb(skb); 1055 kfree_skb(skb);
@@ -1087,6 +1090,9 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
1087 if ((adapter->function_mode & 0x400) && !vtm) 1090 if ((adapter->function_mode & 0x400) && !vtm)
1088 vlanf = 0; 1091 vlanf = 0;
1089 1092
1093 if ((adapter->pvid == vlanf) && !adapter->vlan_tag[vlanf])
1094 vlanf = 0;
1095
1090 skb = napi_get_frags(&eq_obj->napi); 1096 skb = napi_get_frags(&eq_obj->napi);
1091 if (!skb) { 1097 if (!skb) {
1092 be_rx_compl_discard(adapter, rxo, rxcp); 1098 be_rx_compl_discard(adapter, rxo, rxcp);
@@ -1163,20 +1169,20 @@ static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
1163 rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0; 1169 rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
1164} 1170}
1165 1171
1166static inline struct page *be_alloc_pages(u32 size) 1172static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1167{ 1173{
1168 gfp_t alloc_flags = GFP_ATOMIC;
1169 u32 order = get_order(size); 1174 u32 order = get_order(size);
1175
1170 if (order > 0) 1176 if (order > 0)
1171 alloc_flags |= __GFP_COMP; 1177 gfp |= __GFP_COMP;
1172 return alloc_pages(alloc_flags, order); 1178 return alloc_pages(gfp, order);
1173} 1179}
1174 1180
1175/* 1181/*
1176 * Allocate a page, split it to fragments of size rx_frag_size and post as 1182 * Allocate a page, split it to fragments of size rx_frag_size and post as
1177 * receive buffers to BE 1183 * receive buffers to BE
1178 */ 1184 */
1179static void be_post_rx_frags(struct be_rx_obj *rxo) 1185static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1180{ 1186{
1181 struct be_adapter *adapter = rxo->adapter; 1187 struct be_adapter *adapter = rxo->adapter;
1182 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl; 1188 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
@@ -1190,14 +1196,14 @@ static void be_post_rx_frags(struct be_rx_obj *rxo)
1190 page_info = &rxo->page_info_tbl[rxq->head]; 1196 page_info = &rxo->page_info_tbl[rxq->head];
1191 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) { 1197 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1192 if (!pagep) { 1198 if (!pagep) {
1193 pagep = be_alloc_pages(adapter->big_page_size); 1199 pagep = be_alloc_pages(adapter->big_page_size, gfp);
1194 if (unlikely(!pagep)) { 1200 if (unlikely(!pagep)) {
1195 rxo->stats.rx_post_fail++; 1201 rxo->stats.rx_post_fail++;
1196 break; 1202 break;
1197 } 1203 }
1198 page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0, 1204 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1199 adapter->big_page_size, 1205 0, adapter->big_page_size,
1200 PCI_DMA_FROMDEVICE); 1206 DMA_FROM_DEVICE);
1201 page_info->page_offset = 0; 1207 page_info->page_offset = 0;
1202 } else { 1208 } else {
1203 get_page(pagep); 1209 get_page(pagep);
@@ -1270,8 +1276,8 @@ static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1270 do { 1276 do {
1271 cur_index = txq->tail; 1277 cur_index = txq->tail;
1272 wrb = queue_tail_node(txq); 1278 wrb = queue_tail_node(txq);
1273 unmap_tx_frag(adapter->pdev, wrb, (unmap_skb_hdr && 1279 unmap_tx_frag(&adapter->pdev->dev, wrb,
1274 skb_headlen(sent_skb))); 1280 (unmap_skb_hdr && skb_headlen(sent_skb)));
1275 unmap_skb_hdr = false; 1281 unmap_skb_hdr = false;
1276 1282
1277 num_wrbs++; 1283 num_wrbs++;
@@ -1747,7 +1753,7 @@ static int be_poll_rx(struct napi_struct *napi, int budget)
1747 1753
1748 /* Refill the queue */ 1754 /* Refill the queue */
1749 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM) 1755 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1750 be_post_rx_frags(rxo); 1756 be_post_rx_frags(rxo, GFP_ATOMIC);
1751 1757
1752 /* All consumed */ 1758 /* All consumed */
1753 if (work_done < budget) { 1759 if (work_done < budget) {
@@ -1827,6 +1833,7 @@ void be_detect_dump_ue(struct be_adapter *adapter)
1827 1833
1828 if (ue_status_lo || ue_status_hi) { 1834 if (ue_status_lo || ue_status_hi) {
1829 adapter->ue_detected = true; 1835 adapter->ue_detected = true;
1836 adapter->eeh_err = true;
1830 dev_err(&adapter->pdev->dev, "UE Detected!!\n"); 1837 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1831 } 1838 }
1832 1839
@@ -1865,10 +1872,14 @@ static void be_worker(struct work_struct *work)
1865 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; 1872 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1866 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl); 1873 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1867 } 1874 }
1875
1876 if (!adapter->ue_detected && !lancer_chip(adapter))
1877 be_detect_dump_ue(adapter);
1878
1868 goto reschedule; 1879 goto reschedule;
1869 } 1880 }
1870 1881
1871 if (!adapter->stats_ioctl_sent) 1882 if (!adapter->stats_cmd_sent)
1872 be_cmd_get_stats(adapter, &adapter->stats_cmd); 1883 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1873 1884
1874 be_tx_rate_update(adapter); 1885 be_tx_rate_update(adapter);
@@ -1879,7 +1890,7 @@ static void be_worker(struct work_struct *work)
1879 1890
1880 if (rxo->rx_post_starved) { 1891 if (rxo->rx_post_starved) {
1881 rxo->rx_post_starved = false; 1892 rxo->rx_post_starved = false;
1882 be_post_rx_frags(rxo); 1893 be_post_rx_frags(rxo, GFP_KERNEL);
1883 } 1894 }
1884 } 1895 }
1885 if (!adapter->ue_detected && !lancer_chip(adapter)) 1896 if (!adapter->ue_detected && !lancer_chip(adapter))
@@ -2127,7 +2138,7 @@ static int be_open(struct net_device *netdev)
2127 u16 link_speed; 2138 u16 link_speed;
2128 2139
2129 for_all_rx_queues(adapter, rxo, i) { 2140 for_all_rx_queues(adapter, rxo, i) {
2130 be_post_rx_frags(rxo); 2141 be_post_rx_frags(rxo, GFP_KERNEL);
2131 napi_enable(&rxo->rx_eq.napi); 2142 napi_enable(&rxo->rx_eq.napi);
2132 } 2143 }
2133 napi_enable(&tx_eq->napi); 2144 napi_enable(&tx_eq->napi);
@@ -2179,7 +2190,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
2179 memset(mac, 0, ETH_ALEN); 2190 memset(mac, 0, ETH_ALEN);
2180 2191
2181 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config); 2192 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2182 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); 2193 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2194 GFP_KERNEL);
2183 if (cmd.va == NULL) 2195 if (cmd.va == NULL)
2184 return -1; 2196 return -1;
2185 memset(cmd.va, 0, cmd.size); 2197 memset(cmd.va, 0, cmd.size);
@@ -2190,8 +2202,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
2190 if (status) { 2202 if (status) {
2191 dev_err(&adapter->pdev->dev, 2203 dev_err(&adapter->pdev->dev,
2192 "Could not enable Wake-on-lan\n"); 2204 "Could not enable Wake-on-lan\n");
2193 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, 2205 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2194 cmd.dma); 2206 cmd.dma);
2195 return status; 2207 return status;
2196 } 2208 }
2197 status = be_cmd_enable_magic_wol(adapter, 2209 status = be_cmd_enable_magic_wol(adapter,
@@ -2204,7 +2216,7 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
2204 pci_enable_wake(adapter->pdev, PCI_D3cold, 0); 2216 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2205 } 2217 }
2206 2218
2207 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); 2219 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2208 return status; 2220 return status;
2209} 2221}
2210 2222
@@ -2225,7 +2237,8 @@ static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2225 for (vf = 0; vf < num_vfs; vf++) { 2237 for (vf = 0; vf < num_vfs; vf++) {
2226 status = be_cmd_pmac_add(adapter, mac, 2238 status = be_cmd_pmac_add(adapter, mac,
2227 adapter->vf_cfg[vf].vf_if_handle, 2239 adapter->vf_cfg[vf].vf_if_handle,
2228 &adapter->vf_cfg[vf].vf_pmac_id); 2240 &adapter->vf_cfg[vf].vf_pmac_id,
2241 vf + 1);
2229 if (status) 2242 if (status)
2230 dev_err(&adapter->pdev->dev, 2243 dev_err(&adapter->pdev->dev,
2231 "Mac address add failed for VF %d\n", vf); 2244 "Mac address add failed for VF %d\n", vf);
@@ -2245,7 +2258,7 @@ static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2245 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID) 2258 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2246 be_cmd_pmac_del(adapter, 2259 be_cmd_pmac_del(adapter,
2247 adapter->vf_cfg[vf].vf_if_handle, 2260 adapter->vf_cfg[vf].vf_if_handle,
2248 adapter->vf_cfg[vf].vf_pmac_id); 2261 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2249 } 2262 }
2250} 2263}
2251 2264
@@ -2277,22 +2290,26 @@ static int be_setup(struct be_adapter *adapter)
2277 goto do_none; 2290 goto do_none;
2278 2291
2279 if (be_physfn(adapter)) { 2292 if (be_physfn(adapter)) {
2280 while (vf < num_vfs) { 2293 if (adapter->sriov_enabled) {
2281 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED 2294 while (vf < num_vfs) {
2282 | BE_IF_FLAGS_BROADCAST; 2295 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2283 status = be_cmd_if_create(adapter, cap_flags, en_flags, 2296 BE_IF_FLAGS_BROADCAST;
2284 mac, true, 2297 status = be_cmd_if_create(adapter, cap_flags,
2298 en_flags, mac, true,
2285 &adapter->vf_cfg[vf].vf_if_handle, 2299 &adapter->vf_cfg[vf].vf_if_handle,
2286 NULL, vf+1); 2300 NULL, vf+1);
2287 if (status) { 2301 if (status) {
2288 dev_err(&adapter->pdev->dev, 2302 dev_err(&adapter->pdev->dev,
2289 "Interface Create failed for VF %d\n", vf); 2303 "Interface Create failed for VF %d\n",
2290 goto if_destroy; 2304 vf);
2305 goto if_destroy;
2306 }
2307 adapter->vf_cfg[vf].vf_pmac_id =
2308 BE_INVALID_PMAC_ID;
2309 vf++;
2291 } 2310 }
2292 adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
2293 vf++;
2294 } 2311 }
2295 } else if (!be_physfn(adapter)) { 2312 } else {
2296 status = be_cmd_mac_addr_query(adapter, mac, 2313 status = be_cmd_mac_addr_query(adapter, mac,
2297 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle); 2314 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2298 if (!status) { 2315 if (!status) {
@@ -2313,44 +2330,46 @@ static int be_setup(struct be_adapter *adapter)
2313 if (status != 0) 2330 if (status != 0)
2314 goto rx_qs_destroy; 2331 goto rx_qs_destroy;
2315 2332
2316 if (be_physfn(adapter)) {
2317 status = be_vf_eth_addr_config(adapter);
2318 if (status)
2319 goto mcc_q_destroy;
2320 }
2321
2322 adapter->link_speed = -1; 2333 adapter->link_speed = -1;
2323 2334
2324 return 0; 2335 return 0;
2325 2336
2326mcc_q_destroy:
2327 if (be_physfn(adapter))
2328 be_vf_eth_addr_rem(adapter);
2329 be_mcc_queues_destroy(adapter); 2337 be_mcc_queues_destroy(adapter);
2330rx_qs_destroy: 2338rx_qs_destroy:
2331 be_rx_queues_destroy(adapter); 2339 be_rx_queues_destroy(adapter);
2332tx_qs_destroy: 2340tx_qs_destroy:
2333 be_tx_queues_destroy(adapter); 2341 be_tx_queues_destroy(adapter);
2334if_destroy: 2342if_destroy:
2335 for (vf = 0; vf < num_vfs; vf++) 2343 if (be_physfn(adapter) && adapter->sriov_enabled)
2336 if (adapter->vf_cfg[vf].vf_if_handle) 2344 for (vf = 0; vf < num_vfs; vf++)
2337 be_cmd_if_destroy(adapter, 2345 if (adapter->vf_cfg[vf].vf_if_handle)
2338 adapter->vf_cfg[vf].vf_if_handle); 2346 be_cmd_if_destroy(adapter,
2339 be_cmd_if_destroy(adapter, adapter->if_handle); 2347 adapter->vf_cfg[vf].vf_if_handle,
2348 vf + 1);
2349 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2340do_none: 2350do_none:
2341 return status; 2351 return status;
2342} 2352}
2343 2353
2344static int be_clear(struct be_adapter *adapter) 2354static int be_clear(struct be_adapter *adapter)
2345{ 2355{
2346 if (be_physfn(adapter)) 2356 int vf;
2357
2358 if (be_physfn(adapter) && adapter->sriov_enabled)
2347 be_vf_eth_addr_rem(adapter); 2359 be_vf_eth_addr_rem(adapter);
2348 2360
2349 be_mcc_queues_destroy(adapter); 2361 be_mcc_queues_destroy(adapter);
2350 be_rx_queues_destroy(adapter); 2362 be_rx_queues_destroy(adapter);
2351 be_tx_queues_destroy(adapter); 2363 be_tx_queues_destroy(adapter);
2352 2364
2353 be_cmd_if_destroy(adapter, adapter->if_handle); 2365 if (be_physfn(adapter) && adapter->sriov_enabled)
2366 for (vf = 0; vf < num_vfs; vf++)
2367 if (adapter->vf_cfg[vf].vf_if_handle)
2368 be_cmd_if_destroy(adapter,
2369 adapter->vf_cfg[vf].vf_if_handle,
2370 vf + 1);
2371
2372 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2354 2373
2355 /* tell fw we're done with firing cmds */ 2374 /* tell fw we're done with firing cmds */
2356 be_cmd_fw_clean(adapter); 2375 be_cmd_fw_clean(adapter);
@@ -2453,8 +2472,8 @@ static int be_flash_data(struct be_adapter *adapter,
2453 continue; 2472 continue;
2454 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) && 2473 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2455 (!be_flash_redboot(adapter, fw->data, 2474 (!be_flash_redboot(adapter, fw->data,
2456 pflashcomp[i].offset, pflashcomp[i].size, 2475 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2457 filehdr_size))) 2476 (num_of_images * sizeof(struct image_hdr)))))
2458 continue; 2477 continue;
2459 p = fw->data; 2478 p = fw->data;
2460 p += filehdr_size + pflashcomp[i].offset 2479 p += filehdr_size + pflashcomp[i].offset
@@ -2528,8 +2547,8 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
2528 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file); 2547 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2529 2548
2530 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024; 2549 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2531 flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size, 2550 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2532 &flash_cmd.dma); 2551 &flash_cmd.dma, GFP_KERNEL);
2533 if (!flash_cmd.va) { 2552 if (!flash_cmd.va) {
2534 status = -ENOMEM; 2553 status = -ENOMEM;
2535 dev_err(&adapter->pdev->dev, 2554 dev_err(&adapter->pdev->dev,
@@ -2558,8 +2577,8 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
2558 status = -1; 2577 status = -1;
2559 } 2578 }
2560 2579
2561 pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va, 2580 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2562 flash_cmd.dma); 2581 flash_cmd.dma);
2563 if (status) { 2582 if (status) {
2564 dev_err(&adapter->pdev->dev, "Firmware load error\n"); 2583 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2565 goto fw_exit; 2584 goto fw_exit;
@@ -2700,13 +2719,13 @@ static void be_ctrl_cleanup(struct be_adapter *adapter)
2700 be_unmap_pci_bars(adapter); 2719 be_unmap_pci_bars(adapter);
2701 2720
2702 if (mem->va) 2721 if (mem->va)
2703 pci_free_consistent(adapter->pdev, mem->size, 2722 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2704 mem->va, mem->dma); 2723 mem->dma);
2705 2724
2706 mem = &adapter->mc_cmd_mem; 2725 mem = &adapter->mc_cmd_mem;
2707 if (mem->va) 2726 if (mem->va)
2708 pci_free_consistent(adapter->pdev, mem->size, 2727 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2709 mem->va, mem->dma); 2728 mem->dma);
2710} 2729}
2711 2730
2712static int be_ctrl_init(struct be_adapter *adapter) 2731static int be_ctrl_init(struct be_adapter *adapter)
@@ -2721,8 +2740,10 @@ static int be_ctrl_init(struct be_adapter *adapter)
2721 goto done; 2740 goto done;
2722 2741
2723 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; 2742 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2724 mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev, 2743 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2725 mbox_mem_alloc->size, &mbox_mem_alloc->dma); 2744 mbox_mem_alloc->size,
2745 &mbox_mem_alloc->dma,
2746 GFP_KERNEL);
2726 if (!mbox_mem_alloc->va) { 2747 if (!mbox_mem_alloc->va) {
2727 status = -ENOMEM; 2748 status = -ENOMEM;
2728 goto unmap_pci_bars; 2749 goto unmap_pci_bars;
@@ -2734,8 +2755,9 @@ static int be_ctrl_init(struct be_adapter *adapter)
2734 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); 2755 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2735 2756
2736 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config); 2757 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2737 mc_cmd_mem->va = pci_alloc_consistent(adapter->pdev, mc_cmd_mem->size, 2758 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
2738 &mc_cmd_mem->dma); 2759 mc_cmd_mem->size, &mc_cmd_mem->dma,
2760 GFP_KERNEL);
2739 if (mc_cmd_mem->va == NULL) { 2761 if (mc_cmd_mem->va == NULL) {
2740 status = -ENOMEM; 2762 status = -ENOMEM;
2741 goto free_mbox; 2763 goto free_mbox;
@@ -2751,8 +2773,8 @@ static int be_ctrl_init(struct be_adapter *adapter)
2751 return 0; 2773 return 0;
2752 2774
2753free_mbox: 2775free_mbox:
2754 pci_free_consistent(adapter->pdev, mbox_mem_alloc->size, 2776 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
2755 mbox_mem_alloc->va, mbox_mem_alloc->dma); 2777 mbox_mem_alloc->va, mbox_mem_alloc->dma);
2756 2778
2757unmap_pci_bars: 2779unmap_pci_bars:
2758 be_unmap_pci_bars(adapter); 2780 be_unmap_pci_bars(adapter);
@@ -2766,8 +2788,8 @@ static void be_stats_cleanup(struct be_adapter *adapter)
2766 struct be_dma_mem *cmd = &adapter->stats_cmd; 2788 struct be_dma_mem *cmd = &adapter->stats_cmd;
2767 2789
2768 if (cmd->va) 2790 if (cmd->va)
2769 pci_free_consistent(adapter->pdev, cmd->size, 2791 dma_free_coherent(&adapter->pdev->dev, cmd->size,
2770 cmd->va, cmd->dma); 2792 cmd->va, cmd->dma);
2771} 2793}
2772 2794
2773static int be_stats_init(struct be_adapter *adapter) 2795static int be_stats_init(struct be_adapter *adapter)
@@ -2775,7 +2797,8 @@ static int be_stats_init(struct be_adapter *adapter)
2775 struct be_dma_mem *cmd = &adapter->stats_cmd; 2797 struct be_dma_mem *cmd = &adapter->stats_cmd;
2776 2798
2777 cmd->size = sizeof(struct be_cmd_req_get_stats); 2799 cmd->size = sizeof(struct be_cmd_req_get_stats);
2778 cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma); 2800 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
2801 GFP_KERNEL);
2779 if (cmd->va == NULL) 2802 if (cmd->va == NULL)
2780 return -1; 2803 return -1;
2781 memset(cmd->va, 0, cmd->size); 2804 memset(cmd->va, 0, cmd->size);
@@ -2845,6 +2868,10 @@ static int be_get_config(struct be_adapter *adapter)
2845 else 2868 else
2846 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED; 2869 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2847 2870
2871 status = be_cmd_get_cntl_attributes(adapter);
2872 if (status)
2873 return status;
2874
2848 return 0; 2875 return 0;
2849} 2876}
2850 2877
@@ -2918,11 +2945,11 @@ static int __devinit be_probe(struct pci_dev *pdev,
2918 adapter->netdev = netdev; 2945 adapter->netdev = netdev;
2919 SET_NETDEV_DEV(netdev, &pdev->dev); 2946 SET_NETDEV_DEV(netdev, &pdev->dev);
2920 2947
2921 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 2948 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
2922 if (!status) { 2949 if (!status) {
2923 netdev->features |= NETIF_F_HIGHDMA; 2950 netdev->features |= NETIF_F_HIGHDMA;
2924 } else { 2951 } else {
2925 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2952 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
2926 if (status) { 2953 if (status) {
2927 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n"); 2954 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2928 goto free_netdev; 2955 goto free_netdev;
@@ -2947,11 +2974,9 @@ static int __devinit be_probe(struct pci_dev *pdev,
2947 if (status) 2974 if (status)
2948 goto ctrl_clean; 2975 goto ctrl_clean;
2949 2976
2950 if (be_physfn(adapter)) { 2977 status = be_cmd_reset_function(adapter);
2951 status = be_cmd_reset_function(adapter); 2978 if (status)
2952 if (status) 2979 goto ctrl_clean;
2953 goto ctrl_clean;
2954 }
2955 2980
2956 status = be_stats_init(adapter); 2981 status = be_stats_init(adapter);
2957 if (status) 2982 if (status)
@@ -2975,10 +3000,18 @@ static int __devinit be_probe(struct pci_dev *pdev,
2975 goto unsetup; 3000 goto unsetup;
2976 netif_carrier_off(netdev); 3001 netif_carrier_off(netdev);
2977 3002
3003 if (be_physfn(adapter) && adapter->sriov_enabled) {
3004 status = be_vf_eth_addr_config(adapter);
3005 if (status)
3006 goto unreg_netdev;
3007 }
3008
2978 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num); 3009 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
2979 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100)); 3010 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
2980 return 0; 3011 return 0;
2981 3012
3013unreg_netdev:
3014 unregister_netdev(netdev);
2982unsetup: 3015unsetup:
2983 be_clear(adapter); 3016 be_clear(adapter);
2984msix_disable: 3017msix_disable:
@@ -3005,6 +3038,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3005 struct be_adapter *adapter = pci_get_drvdata(pdev); 3038 struct be_adapter *adapter = pci_get_drvdata(pdev);
3006 struct net_device *netdev = adapter->netdev; 3039 struct net_device *netdev = adapter->netdev;
3007 3040
3041 cancel_delayed_work_sync(&adapter->work);
3008 if (adapter->wol) 3042 if (adapter->wol)
3009 be_setup_wol(adapter, true); 3043 be_setup_wol(adapter, true);
3010 3044
@@ -3017,6 +3051,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3017 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc); 3051 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3018 be_clear(adapter); 3052 be_clear(adapter);
3019 3053
3054 be_msix_disable(adapter);
3020 pci_save_state(pdev); 3055 pci_save_state(pdev);
3021 pci_disable_device(pdev); 3056 pci_disable_device(pdev);
3022 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 3057 pci_set_power_state(pdev, pci_choose_state(pdev, state));
@@ -3038,6 +3073,7 @@ static int be_resume(struct pci_dev *pdev)
3038 pci_set_power_state(pdev, 0); 3073 pci_set_power_state(pdev, 0);
3039 pci_restore_state(pdev); 3074 pci_restore_state(pdev);
3040 3075
3076 be_msix_enable(adapter);
3041 /* tell fw we're ready to fire cmds */ 3077 /* tell fw we're ready to fire cmds */
3042 status = be_cmd_fw_init(adapter); 3078 status = be_cmd_fw_init(adapter);
3043 if (status) 3079 if (status)
@@ -3053,6 +3089,8 @@ static int be_resume(struct pci_dev *pdev)
3053 3089
3054 if (adapter->wol) 3090 if (adapter->wol)
3055 be_setup_wol(adapter, false); 3091 be_setup_wol(adapter, false);
3092
3093 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3056 return 0; 3094 return 0;
3057} 3095}
3058 3096
@@ -3064,6 +3102,9 @@ static void be_shutdown(struct pci_dev *pdev)
3064 struct be_adapter *adapter = pci_get_drvdata(pdev); 3102 struct be_adapter *adapter = pci_get_drvdata(pdev);
3065 struct net_device *netdev = adapter->netdev; 3103 struct net_device *netdev = adapter->netdev;
3066 3104
3105 if (netif_running(netdev))
3106 cancel_delayed_work_sync(&adapter->work);
3107
3067 netif_device_detach(netdev); 3108 netif_device_detach(netdev);
3068 3109
3069 be_cmd_reset_function(adapter); 3110 be_cmd_reset_function(adapter);
diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c
index fad912656fe..9f356d5d0f3 100644
--- a/drivers/net/bna/bnad.c
+++ b/drivers/net/bna/bnad.c
@@ -126,22 +126,22 @@ bnad_free_all_txbufs(struct bnad *bnad,
126 } 126 }
127 unmap_array[unmap_cons].skb = NULL; 127 unmap_array[unmap_cons].skb = NULL;
128 128
129 pci_unmap_single(bnad->pcidev, 129 dma_unmap_single(&bnad->pcidev->dev,
130 pci_unmap_addr(&unmap_array[unmap_cons], 130 dma_unmap_addr(&unmap_array[unmap_cons],
131 dma_addr), skb_headlen(skb), 131 dma_addr), skb_headlen(skb),
132 PCI_DMA_TODEVICE); 132 DMA_TO_DEVICE);
133 133
134 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0); 134 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
135 if (++unmap_cons >= unmap_q->q_depth) 135 if (++unmap_cons >= unmap_q->q_depth)
136 break; 136 break;
137 137
138 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 138 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
139 pci_unmap_page(bnad->pcidev, 139 dma_unmap_page(&bnad->pcidev->dev,
140 pci_unmap_addr(&unmap_array[unmap_cons], 140 dma_unmap_addr(&unmap_array[unmap_cons],
141 dma_addr), 141 dma_addr),
142 skb_shinfo(skb)->frags[i].size, 142 skb_shinfo(skb)->frags[i].size,
143 PCI_DMA_TODEVICE); 143 DMA_TO_DEVICE);
144 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 144 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
145 0); 145 0);
146 if (++unmap_cons >= unmap_q->q_depth) 146 if (++unmap_cons >= unmap_q->q_depth)
147 break; 147 break;
@@ -199,23 +199,23 @@ bnad_free_txbufs(struct bnad *bnad,
199 sent_bytes += skb->len; 199 sent_bytes += skb->len;
200 wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags); 200 wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
201 201
202 pci_unmap_single(bnad->pcidev, 202 dma_unmap_single(&bnad->pcidev->dev,
203 pci_unmap_addr(&unmap_array[unmap_cons], 203 dma_unmap_addr(&unmap_array[unmap_cons],
204 dma_addr), skb_headlen(skb), 204 dma_addr), skb_headlen(skb),
205 PCI_DMA_TODEVICE); 205 DMA_TO_DEVICE);
206 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0); 206 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
207 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth); 207 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
208 208
209 prefetch(&unmap_array[unmap_cons + 1]); 209 prefetch(&unmap_array[unmap_cons + 1]);
210 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 210 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
211 prefetch(&unmap_array[unmap_cons + 1]); 211 prefetch(&unmap_array[unmap_cons + 1]);
212 212
213 pci_unmap_page(bnad->pcidev, 213 dma_unmap_page(&bnad->pcidev->dev,
214 pci_unmap_addr(&unmap_array[unmap_cons], 214 dma_unmap_addr(&unmap_array[unmap_cons],
215 dma_addr), 215 dma_addr),
216 skb_shinfo(skb)->frags[i].size, 216 skb_shinfo(skb)->frags[i].size,
217 PCI_DMA_TODEVICE); 217 DMA_TO_DEVICE);
218 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 218 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
219 0); 219 0);
220 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth); 220 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
221 } 221 }
@@ -340,19 +340,22 @@ static void
340bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb) 340bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
341{ 341{
342 struct bnad_unmap_q *unmap_q; 342 struct bnad_unmap_q *unmap_q;
343 struct bnad_skb_unmap *unmap_array;
343 struct sk_buff *skb; 344 struct sk_buff *skb;
344 int unmap_cons; 345 int unmap_cons;
345 346
346 unmap_q = rcb->unmap_q; 347 unmap_q = rcb->unmap_q;
348 unmap_array = unmap_q->unmap_array;
347 for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) { 349 for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
348 skb = unmap_q->unmap_array[unmap_cons].skb; 350 skb = unmap_array[unmap_cons].skb;
349 if (!skb) 351 if (!skb)
350 continue; 352 continue;
351 unmap_q->unmap_array[unmap_cons].skb = NULL; 353 unmap_array[unmap_cons].skb = NULL;
352 pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q-> 354 dma_unmap_single(&bnad->pcidev->dev,
353 unmap_array[unmap_cons], 355 dma_unmap_addr(&unmap_array[unmap_cons],
354 dma_addr), rcb->rxq->buffer_size, 356 dma_addr),
355 PCI_DMA_FROMDEVICE); 357 rcb->rxq->buffer_size,
358 DMA_FROM_DEVICE);
356 dev_kfree_skb(skb); 359 dev_kfree_skb(skb);
357 } 360 }
358 bnad_reset_rcb(bnad, rcb); 361 bnad_reset_rcb(bnad, rcb);
@@ -391,9 +394,10 @@ bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
391 skb->dev = bnad->netdev; 394 skb->dev = bnad->netdev;
392 skb_reserve(skb, NET_IP_ALIGN); 395 skb_reserve(skb, NET_IP_ALIGN);
393 unmap_array[unmap_prod].skb = skb; 396 unmap_array[unmap_prod].skb = skb;
394 dma_addr = pci_map_single(bnad->pcidev, skb->data, 397 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
395 rcb->rxq->buffer_size, PCI_DMA_FROMDEVICE); 398 rcb->rxq->buffer_size,
396 pci_unmap_addr_set(&unmap_array[unmap_prod], dma_addr, 399 DMA_FROM_DEVICE);
400 dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
397 dma_addr); 401 dma_addr);
398 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr); 402 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
399 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth); 403 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
@@ -434,8 +438,9 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
434 struct bna_rcb *rcb = NULL; 438 struct bna_rcb *rcb = NULL;
435 unsigned int wi_range, packets = 0, wis = 0; 439 unsigned int wi_range, packets = 0, wis = 0;
436 struct bnad_unmap_q *unmap_q; 440 struct bnad_unmap_q *unmap_q;
441 struct bnad_skb_unmap *unmap_array;
437 struct sk_buff *skb; 442 struct sk_buff *skb;
438 u32 flags; 443 u32 flags, unmap_cons;
439 u32 qid0 = ccb->rcb[0]->rxq->rxq_id; 444 u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
440 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate; 445 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
441 446
@@ -456,17 +461,17 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
456 rcb = ccb->rcb[1]; 461 rcb = ccb->rcb[1];
457 462
458 unmap_q = rcb->unmap_q; 463 unmap_q = rcb->unmap_q;
464 unmap_array = unmap_q->unmap_array;
465 unmap_cons = unmap_q->consumer_index;
459 466
460 skb = unmap_q->unmap_array[unmap_q->consumer_index].skb; 467 skb = unmap_array[unmap_cons].skb;
461 BUG_ON(!(skb)); 468 BUG_ON(!(skb));
462 unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL; 469 unmap_array[unmap_cons].skb = NULL;
463 pci_unmap_single(bnad->pcidev, 470 dma_unmap_single(&bnad->pcidev->dev,
464 pci_unmap_addr(&unmap_q-> 471 dma_unmap_addr(&unmap_array[unmap_cons],
465 unmap_array[unmap_q->
466 consumer_index],
467 dma_addr), 472 dma_addr),
468 rcb->rxq->buffer_size, 473 rcb->rxq->buffer_size,
469 PCI_DMA_FROMDEVICE); 474 DMA_FROM_DEVICE);
470 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth); 475 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
471 476
472 /* Should be more efficient ? Performance ? */ 477 /* Should be more efficient ? Performance ? */
@@ -1015,9 +1020,9 @@ bnad_mem_free(struct bnad *bnad,
1015 if (mem_info->mem_type == BNA_MEM_T_DMA) { 1020 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1016 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma), 1021 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1017 dma_pa); 1022 dma_pa);
1018 pci_free_consistent(bnad->pcidev, 1023 dma_free_coherent(&bnad->pcidev->dev,
1019 mem_info->mdl[i].len, 1024 mem_info->mdl[i].len,
1020 mem_info->mdl[i].kva, dma_pa); 1025 mem_info->mdl[i].kva, dma_pa);
1021 } else 1026 } else
1022 kfree(mem_info->mdl[i].kva); 1027 kfree(mem_info->mdl[i].kva);
1023 } 1028 }
@@ -1047,8 +1052,9 @@ bnad_mem_alloc(struct bnad *bnad,
1047 for (i = 0; i < mem_info->num; i++) { 1052 for (i = 0; i < mem_info->num; i++) {
1048 mem_info->mdl[i].len = mem_info->len; 1053 mem_info->mdl[i].len = mem_info->len;
1049 mem_info->mdl[i].kva = 1054 mem_info->mdl[i].kva =
1050 pci_alloc_consistent(bnad->pcidev, 1055 dma_alloc_coherent(&bnad->pcidev->dev,
1051 mem_info->len, &dma_pa); 1056 mem_info->len, &dma_pa,
1057 GFP_KERNEL);
1052 1058
1053 if (mem_info->mdl[i].kva == NULL) 1059 if (mem_info->mdl[i].kva == NULL)
1054 goto err_return; 1060 goto err_return;
@@ -2600,9 +2606,9 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2600 unmap_q->unmap_array[unmap_prod].skb = skb; 2606 unmap_q->unmap_array[unmap_prod].skb = skb;
2601 BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR)); 2607 BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR));
2602 txqent->vector[vect_id].length = htons(skb_headlen(skb)); 2608 txqent->vector[vect_id].length = htons(skb_headlen(skb));
2603 dma_addr = pci_map_single(bnad->pcidev, skb->data, skb_headlen(skb), 2609 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
2604 PCI_DMA_TODEVICE); 2610 skb_headlen(skb), DMA_TO_DEVICE);
2605 pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr, 2611 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2606 dma_addr); 2612 dma_addr);
2607 2613
2608 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr); 2614 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
@@ -2630,11 +2636,9 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2630 2636
2631 BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR)); 2637 BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
2632 txqent->vector[vect_id].length = htons(size); 2638 txqent->vector[vect_id].length = htons(size);
2633 dma_addr = 2639 dma_addr = dma_map_page(&bnad->pcidev->dev, frag->page,
2634 pci_map_page(bnad->pcidev, frag->page, 2640 frag->page_offset, size, DMA_TO_DEVICE);
2635 frag->page_offset, size, 2641 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2636 PCI_DMA_TODEVICE);
2637 pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2638 dma_addr); 2642 dma_addr);
2639 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr); 2643 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2640 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth); 2644 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
@@ -3022,14 +3026,14 @@ bnad_pci_init(struct bnad *bnad,
3022 err = pci_request_regions(pdev, BNAD_NAME); 3026 err = pci_request_regions(pdev, BNAD_NAME);
3023 if (err) 3027 if (err)
3024 goto disable_device; 3028 goto disable_device;
3025 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && 3029 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3026 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { 3030 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
3027 *using_dac = 1; 3031 *using_dac = 1;
3028 } else { 3032 } else {
3029 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 3033 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3030 if (err) { 3034 if (err) {
3031 err = pci_set_consistent_dma_mask(pdev, 3035 err = dma_set_coherent_mask(&pdev->dev,
3032 DMA_BIT_MASK(32)); 3036 DMA_BIT_MASK(32));
3033 if (err) 3037 if (err)
3034 goto release_regions; 3038 goto release_regions;
3035 } 3039 }
diff --git a/drivers/net/bna/bnad.h b/drivers/net/bna/bnad.h
index 8b1d51557de..a89117fa497 100644
--- a/drivers/net/bna/bnad.h
+++ b/drivers/net/bna/bnad.h
@@ -181,7 +181,7 @@ struct bnad_rx_info {
181/* Unmap queues for Tx / Rx cleanup */ 181/* Unmap queues for Tx / Rx cleanup */
182struct bnad_skb_unmap { 182struct bnad_skb_unmap {
183 struct sk_buff *skb; 183 struct sk_buff *skb;
184 DECLARE_PCI_UNMAP_ADDR(dma_addr) 184 DEFINE_DMA_UNMAP_ADDR(dma_addr);
185}; 185};
186 186
187struct bnad_unmap_q { 187struct bnad_unmap_q {
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 0ba59d5aeb7..2a961b7f7e1 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -435,7 +435,8 @@ bnx2_cnic_stop(struct bnx2 *bp)
435 struct cnic_ctl_info info; 435 struct cnic_ctl_info info;
436 436
437 mutex_lock(&bp->cnic_lock); 437 mutex_lock(&bp->cnic_lock);
438 c_ops = bp->cnic_ops; 438 c_ops = rcu_dereference_protected(bp->cnic_ops,
439 lockdep_is_held(&bp->cnic_lock));
439 if (c_ops) { 440 if (c_ops) {
440 info.cmd = CNIC_CTL_STOP_CMD; 441 info.cmd = CNIC_CTL_STOP_CMD;
441 c_ops->cnic_ctl(bp->cnic_data, &info); 442 c_ops->cnic_ctl(bp->cnic_data, &info);
@@ -450,7 +451,8 @@ bnx2_cnic_start(struct bnx2 *bp)
450 struct cnic_ctl_info info; 451 struct cnic_ctl_info info;
451 452
452 mutex_lock(&bp->cnic_lock); 453 mutex_lock(&bp->cnic_lock);
453 c_ops = bp->cnic_ops; 454 c_ops = rcu_dereference_protected(bp->cnic_ops,
455 lockdep_is_held(&bp->cnic_lock));
454 if (c_ops) { 456 if (c_ops) {
455 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) { 457 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
456 struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; 458 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
@@ -8315,7 +8317,7 @@ static const struct net_device_ops bnx2_netdev_ops = {
8315#endif 8317#endif
8316}; 8318};
8317 8319
8318static void inline vlan_features_add(struct net_device *dev, unsigned long flags) 8320static void inline vlan_features_add(struct net_device *dev, u32 flags)
8319{ 8321{
8320 dev->vlan_features |= flags; 8322 dev->vlan_features |= flags;
8321} 8323}
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index f459fb2f9ad..7a5e88f831f 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6207,6 +6207,8 @@ struct l2_fhdr {
6207 6207
6208#define BNX2_CP_SCRATCH 0x001a0000 6208#define BNX2_CP_SCRATCH 0x001a0000
6209 6209
6210#define BNX2_FW_MAX_ISCSI_CONN 0x001a0080
6211
6210 6212
6211/* 6213/*
6212 * mcp_reg definition 6214 * mcp_reg definition
@@ -6759,7 +6761,7 @@ struct bnx2 {
6759 u32 tx_wake_thresh; 6761 u32 tx_wake_thresh;
6760 6762
6761#ifdef BCM_CNIC 6763#ifdef BCM_CNIC
6762 struct cnic_ops *cnic_ops; 6764 struct cnic_ops __rcu *cnic_ops;
6763 void *cnic_data; 6765 void *cnic_data;
6764#endif 6766#endif
6765 6767
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 7897d114b29..50d1e079309 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -22,8 +22,8 @@
22 * (you will need to reboot afterwards) */ 22 * (you will need to reboot afterwards) */
23/* #define BNX2X_STOP_ON_ERROR */ 23/* #define BNX2X_STOP_ON_ERROR */
24 24
25#define DRV_MODULE_VERSION "1.62.00-6" 25#define DRV_MODULE_VERSION "1.62.11-0"
26#define DRV_MODULE_RELDATE "2011/01/30" 26#define DRV_MODULE_RELDATE "2011/01/31"
27#define BNX2X_BC_VER 0x040200 27#define BNX2X_BC_VER 0x040200
28 28
29#define BNX2X_MULTI_QUEUE 29#define BNX2X_MULTI_QUEUE
@@ -31,7 +31,7 @@
31#define BNX2X_NEW_NAPI 31#define BNX2X_NEW_NAPI
32 32
33#if defined(CONFIG_DCB) 33#if defined(CONFIG_DCB)
34#define BCM_DCB 34#define BCM_DCBNL
35#endif 35#endif
36#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE) 36#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
37#define BCM_CNIC 1 37#define BCM_CNIC 1
@@ -129,6 +129,7 @@ void bnx2x_panic_dump(struct bnx2x *bp);
129#endif 129#endif
130 130
131#define bnx2x_mc_addr(ha) ((ha)->addr) 131#define bnx2x_mc_addr(ha) ((ha)->addr)
132#define bnx2x_uc_addr(ha) ((ha)->addr)
132 133
133#define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff) 134#define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff)
134#define U64_HI(x) (u32)(((u64)(x)) >> 32) 135#define U64_HI(x) (u32)(((u64)(x)) >> 32)
@@ -341,6 +342,8 @@ struct bnx2x_fastpath {
341 /* chip independed shortcut into rx_prods_offset memory */ 342 /* chip independed shortcut into rx_prods_offset memory */
342 u32 ustorm_rx_prods_offset; 343 u32 ustorm_rx_prods_offset;
343 344
345 u32 rx_buf_size;
346
344 dma_addr_t status_blk_mapping; 347 dma_addr_t status_blk_mapping;
345 348
346 struct sw_tx_bd *tx_buf_ring; 349 struct sw_tx_bd *tx_buf_ring;
@@ -428,6 +431,10 @@ struct bnx2x_fastpath {
428}; 431};
429 432
430#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) 433#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var)
434
435/* Use 2500 as a mini-jumbo MTU for FCoE */
436#define BNX2X_FCOE_MINI_JUMBO_MTU 2500
437
431#ifdef BCM_CNIC 438#ifdef BCM_CNIC
432/* FCoE L2 `fastpath' is right after the eth entries */ 439/* FCoE L2 `fastpath' is right after the eth entries */
433#define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp) 440#define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp)
@@ -810,6 +817,7 @@ struct bnx2x_slowpath {
810 struct eth_stats_query fw_stats; 817 struct eth_stats_query fw_stats;
811 struct mac_configuration_cmd mac_config; 818 struct mac_configuration_cmd mac_config;
812 struct mac_configuration_cmd mcast_config; 819 struct mac_configuration_cmd mcast_config;
820 struct mac_configuration_cmd uc_mac_config;
813 struct client_init_ramrod_data client_init_data; 821 struct client_init_ramrod_data client_init_data;
814 822
815 /* used by dmae command executer */ 823 /* used by dmae command executer */
@@ -911,7 +919,6 @@ struct bnx2x {
911 int tx_ring_size; 919 int tx_ring_size;
912 920
913 u32 rx_csum; 921 u32 rx_csum;
914 u32 rx_buf_size;
915/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */ 922/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
916#define ETH_OVREHEAD (ETH_HLEN + 8 + 8) 923#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)
917#define ETH_MIN_PACKET_SIZE 60 924#define ETH_MIN_PACKET_SIZE 60
@@ -939,7 +946,7 @@ struct bnx2x {
939 struct eth_spe *spq_prod_bd; 946 struct eth_spe *spq_prod_bd;
940 struct eth_spe *spq_last_bd; 947 struct eth_spe *spq_last_bd;
941 __le16 *dsb_sp_prod; 948 __le16 *dsb_sp_prod;
942 atomic_t spq_left; /* serialize spq */ 949 atomic_t cq_spq_left; /* ETH_XXX ramrods credit */
943 /* used to synchronize spq accesses */ 950 /* used to synchronize spq accesses */
944 spinlock_t spq_lock; 951 spinlock_t spq_lock;
945 952
@@ -949,6 +956,7 @@ struct bnx2x {
949 u16 eq_prod; 956 u16 eq_prod;
950 u16 eq_cons; 957 u16 eq_cons;
951 __le16 *eq_cons_sb; 958 __le16 *eq_cons_sb;
959 atomic_t eq_spq_left; /* COMMON_XXX ramrods credit */
952 960
953 /* Flags for marking that there is a STAT_QUERY or 961 /* Flags for marking that there is a STAT_QUERY or
954 SET_MAC ramrod pending */ 962 SET_MAC ramrod pending */
@@ -976,8 +984,12 @@ struct bnx2x {
976#define MF_FUNC_DIS 0x1000 984#define MF_FUNC_DIS 0x1000
977#define FCOE_MACS_SET 0x2000 985#define FCOE_MACS_SET 0x2000
978#define NO_FCOE_FLAG 0x4000 986#define NO_FCOE_FLAG 0x4000
987#define NO_ISCSI_OOO_FLAG 0x8000
988#define NO_ISCSI_FLAG 0x10000
979 989
980#define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG) 990#define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG)
991#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG)
992#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG)
981 993
982 int pf_num; /* absolute PF number */ 994 int pf_num; /* absolute PF number */
983 int pfid; /* per-path PF number */ 995 int pfid; /* per-path PF number */
@@ -1064,6 +1076,7 @@ struct bnx2x {
1064 int num_queues; 1076 int num_queues;
1065 int disable_tpa; 1077 int disable_tpa;
1066 int int_mode; 1078 int int_mode;
1079 u32 *rx_indir_table;
1067 1080
1068 struct tstorm_eth_mac_filter_config mac_filters; 1081 struct tstorm_eth_mac_filter_config mac_filters;
1069#define BNX2X_ACCEPT_NONE 0x0000 1082#define BNX2X_ACCEPT_NONE 0x0000
@@ -1110,7 +1123,7 @@ struct bnx2x {
1110#define BNX2X_CNIC_FLAG_MAC_SET 1 1123#define BNX2X_CNIC_FLAG_MAC_SET 1
1111 void *t2; 1124 void *t2;
1112 dma_addr_t t2_mapping; 1125 dma_addr_t t2_mapping;
1113 struct cnic_ops *cnic_ops; 1126 struct cnic_ops __rcu *cnic_ops;
1114 void *cnic_data; 1127 void *cnic_data;
1115 u32 cnic_tag; 1128 u32 cnic_tag;
1116 struct cnic_eth_dev cnic_eth_dev; 1129 struct cnic_eth_dev cnic_eth_dev;
@@ -1125,13 +1138,12 @@ struct bnx2x {
1125 u16 cnic_kwq_pending; 1138 u16 cnic_kwq_pending;
1126 u16 cnic_spq_pending; 1139 u16 cnic_spq_pending;
1127 struct mutex cnic_mutex; 1140 struct mutex cnic_mutex;
1128 u8 iscsi_mac[ETH_ALEN];
1129 u8 fip_mac[ETH_ALEN]; 1141 u8 fip_mac[ETH_ALEN];
1130#endif 1142#endif
1131 1143
1132 int dmae_ready; 1144 int dmae_ready;
1133 /* used to synchronize dmae accesses */ 1145 /* used to synchronize dmae accesses */
1134 struct mutex dmae_mutex; 1146 spinlock_t dmae_lock;
1135 1147
1136 /* used to protect the FW mail box */ 1148 /* used to protect the FW mail box */
1137 struct mutex fw_mb_mutex; 1149 struct mutex fw_mb_mutex;
@@ -1447,6 +1459,12 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
1447void bnx2x_calc_fc_adv(struct bnx2x *bp); 1459void bnx2x_calc_fc_adv(struct bnx2x *bp);
1448int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, 1460int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
1449 u32 data_hi, u32 data_lo, int common); 1461 u32 data_hi, u32 data_lo, int common);
1462
1463/* Clears multicast and unicast list configuration in the chip. */
1464void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp);
1465void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp);
1466void bnx2x_invalidate_uc_list(struct bnx2x *bp);
1467
1450void bnx2x_update_coalesce(struct bnx2x *bp); 1468void bnx2x_update_coalesce(struct bnx2x *bp);
1451int bnx2x_get_link_cfg_idx(struct bnx2x *bp); 1469int bnx2x_get_link_cfg_idx(struct bnx2x *bp);
1452 1470
@@ -1786,5 +1804,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1786BNX2X_EXTERN int load_count[2][3]; /* per path: 0-common, 1-port0, 2-port1 */ 1804BNX2X_EXTERN int load_count[2][3]; /* per path: 0-common, 1-port0, 2-port1 */
1787 1805
1788extern void bnx2x_set_ethtool_ops(struct net_device *netdev); 1806extern void bnx2x_set_ethtool_ops(struct net_device *netdev);
1807void bnx2x_push_indir_table(struct bnx2x *bp);
1789 1808
1790#endif /* bnx2x.h */ 1809#endif /* bnx2x.h */
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 93798129061..b01b622f4e1 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -232,7 +232,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
232 /* move empty skb from pool to prod and map it */ 232 /* move empty skb from pool to prod and map it */
233 prod_rx_buf->skb = fp->tpa_pool[queue].skb; 233 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
234 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data, 234 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
235 bp->rx_buf_size, DMA_FROM_DEVICE); 235 fp->rx_buf_size, DMA_FROM_DEVICE);
236 dma_unmap_addr_set(prod_rx_buf, mapping, mapping); 236 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
237 237
238 /* move partial skb from cons to pool (don't unmap yet) */ 238 /* move partial skb from cons to pool (don't unmap yet) */
@@ -367,13 +367,13 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
367 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue]; 367 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
368 struct sk_buff *skb = rx_buf->skb; 368 struct sk_buff *skb = rx_buf->skb;
369 /* alloc new skb */ 369 /* alloc new skb */
370 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); 370 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
371 371
372 /* Unmap skb in the pool anyway, as we are going to change 372 /* Unmap skb in the pool anyway, as we are going to change
373 pool entry status to BNX2X_TPA_STOP even if new skb allocation 373 pool entry status to BNX2X_TPA_STOP even if new skb allocation
374 fails. */ 374 fails. */
375 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), 375 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
376 bp->rx_buf_size, DMA_FROM_DEVICE); 376 fp->rx_buf_size, DMA_FROM_DEVICE);
377 377
378 if (likely(new_skb)) { 378 if (likely(new_skb)) {
379 /* fix ip xsum and give it to the stack */ 379 /* fix ip xsum and give it to the stack */
@@ -385,10 +385,10 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
385 prefetch(((char *)(skb)) + L1_CACHE_BYTES); 385 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
386 386
387#ifdef BNX2X_STOP_ON_ERROR 387#ifdef BNX2X_STOP_ON_ERROR
388 if (pad + len > bp->rx_buf_size) { 388 if (pad + len > fp->rx_buf_size) {
389 BNX2X_ERR("skb_put is about to fail... " 389 BNX2X_ERR("skb_put is about to fail... "
390 "pad %d len %d rx_buf_size %d\n", 390 "pad %d len %d rx_buf_size %d\n",
391 pad, len, bp->rx_buf_size); 391 pad, len, fp->rx_buf_size);
392 bnx2x_panic(); 392 bnx2x_panic();
393 return; 393 return;
394 } 394 }
@@ -618,7 +618,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
618 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) { 618 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
619 dma_unmap_single(&bp->pdev->dev, 619 dma_unmap_single(&bp->pdev->dev,
620 dma_unmap_addr(rx_buf, mapping), 620 dma_unmap_addr(rx_buf, mapping),
621 bp->rx_buf_size, 621 fp->rx_buf_size,
622 DMA_FROM_DEVICE); 622 DMA_FROM_DEVICE);
623 skb_reserve(skb, pad); 623 skb_reserve(skb, pad);
624 skb_put(skb, len); 624 skb_put(skb, len);
@@ -858,19 +858,16 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
858 u16 ring_prod; 858 u16 ring_prod;
859 int i, j; 859 int i, j;
860 860
861 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
862 IP_HEADER_ALIGNMENT_PADDING;
863
864 DP(NETIF_MSG_IFUP,
865 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
866
867 for_each_rx_queue(bp, j) { 861 for_each_rx_queue(bp, j) {
868 struct bnx2x_fastpath *fp = &bp->fp[j]; 862 struct bnx2x_fastpath *fp = &bp->fp[j];
869 863
864 DP(NETIF_MSG_IFUP,
865 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
866
870 if (!fp->disable_tpa) { 867 if (!fp->disable_tpa) {
871 for (i = 0; i < max_agg_queues; i++) { 868 for (i = 0; i < max_agg_queues; i++) {
872 fp->tpa_pool[i].skb = 869 fp->tpa_pool[i].skb =
873 netdev_alloc_skb(bp->dev, bp->rx_buf_size); 870 netdev_alloc_skb(bp->dev, fp->rx_buf_size);
874 if (!fp->tpa_pool[i].skb) { 871 if (!fp->tpa_pool[i].skb) {
875 BNX2X_ERR("Failed to allocate TPA " 872 BNX2X_ERR("Failed to allocate TPA "
876 "skb pool for queue[%d] - " 873 "skb pool for queue[%d] - "
@@ -978,7 +975,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
978 975
979 dma_unmap_single(&bp->pdev->dev, 976 dma_unmap_single(&bp->pdev->dev,
980 dma_unmap_addr(rx_buf, mapping), 977 dma_unmap_addr(rx_buf, mapping),
981 bp->rx_buf_size, DMA_FROM_DEVICE); 978 fp->rx_buf_size, DMA_FROM_DEVICE);
982 979
983 rx_buf->skb = NULL; 980 rx_buf->skb = NULL;
984 dev_kfree_skb(skb); 981 dev_kfree_skb(skb);
@@ -1286,6 +1283,31 @@ static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1286 return rc; 1283 return rc;
1287} 1284}
1288 1285
1286static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1287{
1288 int i;
1289
1290 for_each_queue(bp, i) {
1291 struct bnx2x_fastpath *fp = &bp->fp[i];
1292
1293 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1294 if (IS_FCOE_IDX(i))
1295 /*
1296 * Although there are no IP frames expected to arrive to
1297 * this ring we still want to add an
1298 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1299 * overrun attack.
1300 */
1301 fp->rx_buf_size =
1302 BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
1303 BNX2X_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
1304 else
1305 fp->rx_buf_size =
1306 bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
1307 IP_HEADER_ALIGNMENT_PADDING;
1308 }
1309}
1310
1289/* must be called with rtnl_lock */ 1311/* must be called with rtnl_lock */
1290int bnx2x_nic_load(struct bnx2x *bp, int load_mode) 1312int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1291{ 1313{
@@ -1309,6 +1331,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1309 /* must be called before memory allocation and HW init */ 1331 /* must be called before memory allocation and HW init */
1310 bnx2x_ilt_set_info(bp); 1332 bnx2x_ilt_set_info(bp);
1311 1333
1334 /* Set the receive queues buffer size */
1335 bnx2x_set_rx_buf_size(bp);
1336
1312 if (bnx2x_alloc_mem(bp)) 1337 if (bnx2x_alloc_mem(bp))
1313 return -ENOMEM; 1338 return -ENOMEM;
1314 1339
@@ -1464,28 +1489,35 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1464 1489
1465 bnx2x_set_eth_mac(bp, 1); 1490 bnx2x_set_eth_mac(bp, 1);
1466 1491
1492 /* Clear MC configuration */
1493 if (CHIP_IS_E1(bp))
1494 bnx2x_invalidate_e1_mc_list(bp);
1495 else
1496 bnx2x_invalidate_e1h_mc_list(bp);
1497
1498 /* Clear UC lists configuration */
1499 bnx2x_invalidate_uc_list(bp);
1500
1467 if (bp->port.pmf) 1501 if (bp->port.pmf)
1468 bnx2x_initial_phy_init(bp, load_mode); 1502 bnx2x_initial_phy_init(bp, load_mode);
1469 1503
1504 /* Initialize Rx filtering */
1505 bnx2x_set_rx_mode(bp->dev);
1506
1470 /* Start fast path */ 1507 /* Start fast path */
1471 switch (load_mode) { 1508 switch (load_mode) {
1472 case LOAD_NORMAL: 1509 case LOAD_NORMAL:
1473 /* Tx queue should be only reenabled */ 1510 /* Tx queue should be only reenabled */
1474 netif_tx_wake_all_queues(bp->dev); 1511 netif_tx_wake_all_queues(bp->dev);
1475 /* Initialize the receive filter. */ 1512 /* Initialize the receive filter. */
1476 bnx2x_set_rx_mode(bp->dev);
1477 break; 1513 break;
1478 1514
1479 case LOAD_OPEN: 1515 case LOAD_OPEN:
1480 netif_tx_start_all_queues(bp->dev); 1516 netif_tx_start_all_queues(bp->dev);
1481 smp_mb__after_clear_bit(); 1517 smp_mb__after_clear_bit();
1482 /* Initialize the receive filter. */
1483 bnx2x_set_rx_mode(bp->dev);
1484 break; 1518 break;
1485 1519
1486 case LOAD_DIAG: 1520 case LOAD_DIAG:
1487 /* Initialize the receive filter. */
1488 bnx2x_set_rx_mode(bp->dev);
1489 bp->state = BNX2X_STATE_DIAG; 1521 bp->state = BNX2X_STATE_DIAG;
1490 break; 1522 break;
1491 1523
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index 326ba44b3de..8c401c990bf 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -822,11 +822,11 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
822 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; 822 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
823 dma_addr_t mapping; 823 dma_addr_t mapping;
824 824
825 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); 825 skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
826 if (unlikely(skb == NULL)) 826 if (unlikely(skb == NULL))
827 return -ENOMEM; 827 return -ENOMEM;
828 828
829 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size, 829 mapping = dma_map_single(&bp->pdev->dev, skb->data, fp->rx_buf_size,
830 DMA_FROM_DEVICE); 830 DMA_FROM_DEVICE);
831 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 831 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
832 dev_kfree_skb(skb); 832 dev_kfree_skb(skb);
@@ -892,7 +892,7 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
892 if (fp->tpa_state[i] == BNX2X_TPA_START) 892 if (fp->tpa_state[i] == BNX2X_TPA_START)
893 dma_unmap_single(&bp->pdev->dev, 893 dma_unmap_single(&bp->pdev->dev,
894 dma_unmap_addr(rx_buf, mapping), 894 dma_unmap_addr(rx_buf, mapping),
895 bp->rx_buf_size, DMA_FROM_DEVICE); 895 fp->rx_buf_size, DMA_FROM_DEVICE);
896 896
897 dev_kfree_skb(skb); 897 dev_kfree_skb(skb);
898 rx_buf->skb = NULL; 898 rx_buf->skb = NULL;
diff --git a/drivers/net/bnx2x/bnx2x_dcb.c b/drivers/net/bnx2x/bnx2x_dcb.c
index fb60021f81f..9a24d79c71d 100644
--- a/drivers/net/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/bnx2x/bnx2x_dcb.c
@@ -19,6 +19,9 @@
19#include <linux/netdevice.h> 19#include <linux/netdevice.h>
20#include <linux/types.h> 20#include <linux/types.h>
21#include <linux/errno.h> 21#include <linux/errno.h>
22#ifdef BCM_DCBNL
23#include <linux/dcbnl.h>
24#endif
22 25
23#include "bnx2x.h" 26#include "bnx2x.h"
24#include "bnx2x_cmn.h" 27#include "bnx2x_cmn.h"
@@ -508,13 +511,75 @@ static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp)
508 return 0; 511 return 0;
509} 512}
510 513
514
515#ifdef BCM_DCBNL
516static inline
517u8 bnx2x_dcbx_dcbnl_app_up(struct dcbx_app_priority_entry *ent)
518{
519 u8 pri;
520
521 /* Choose the highest priority */
522 for (pri = MAX_PFC_PRIORITIES - 1; pri > 0; pri--)
523 if (ent->pri_bitmap & (1 << pri))
524 break;
525 return pri;
526}
527
528static inline
529u8 bnx2x_dcbx_dcbnl_app_idtype(struct dcbx_app_priority_entry *ent)
530{
531 return ((ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) ==
532 DCBX_APP_SF_PORT) ? DCB_APP_IDTYPE_PORTNUM :
533 DCB_APP_IDTYPE_ETHTYPE;
534}
535
536static inline
537void bnx2x_dcbx_invalidate_local_apps(struct bnx2x *bp)
538{
539 int i;
540 for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++)
541 bp->dcbx_local_feat.app.app_pri_tbl[i].appBitfield &=
542 ~DCBX_APP_ENTRY_VALID;
543}
544
545int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall)
546{
547 int i, err = 0;
548
549 for (i = 0; i < DCBX_MAX_APP_PROTOCOL && err == 0; i++) {
550 struct dcbx_app_priority_entry *ent =
551 &bp->dcbx_local_feat.app.app_pri_tbl[i];
552
553 if (ent->appBitfield & DCBX_APP_ENTRY_VALID) {
554 u8 up = bnx2x_dcbx_dcbnl_app_up(ent);
555
556 /* avoid invalid user-priority */
557 if (up) {
558 struct dcb_app app;
559 app.selector = bnx2x_dcbx_dcbnl_app_idtype(ent);
560 app.protocol = ent->app_id;
561 app.priority = delall ? 0 : up;
562 err = dcb_setapp(bp->dev, &app);
563 }
564 }
565 }
566 return err;
567}
568#endif
569
511void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) 570void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
512{ 571{
513 switch (state) { 572 switch (state) {
514 case BNX2X_DCBX_STATE_NEG_RECEIVED: 573 case BNX2X_DCBX_STATE_NEG_RECEIVED:
515 { 574 {
516 DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_NEG_RECEIVED\n"); 575 DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_NEG_RECEIVED\n");
517 576#ifdef BCM_DCBNL
577 /**
578 * Delete app tlvs from dcbnl before reading new
579 * negotiation results
580 */
581 bnx2x_dcbnl_update_applist(bp, true);
582#endif
518 /* Read neg results if dcbx is in the FW */ 583 /* Read neg results if dcbx is in the FW */
519 if (bnx2x_dcbx_read_shmem_neg_results(bp)) 584 if (bnx2x_dcbx_read_shmem_neg_results(bp))
520 return; 585 return;
@@ -526,10 +591,24 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
526 bp->dcbx_error); 591 bp->dcbx_error);
527 592
528 if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) { 593 if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) {
594#ifdef BCM_DCBNL
595 /**
596 * Add new app tlvs to dcbnl
597 */
598 bnx2x_dcbnl_update_applist(bp, false);
599#endif
529 bnx2x_dcbx_stop_hw_tx(bp); 600 bnx2x_dcbx_stop_hw_tx(bp);
530 return; 601 return;
531 } 602 }
532 /* fall through */ 603 /* fall through */
604#ifdef BCM_DCBNL
605 /**
606 * Invalidate the local app tlvs if they are not added
607 * to the dcbnl app list to avoid deleting them from
608 * the list later on
609 */
610 bnx2x_dcbx_invalidate_local_apps(bp);
611#endif
533 } 612 }
534 case BNX2X_DCBX_STATE_TX_PAUSED: 613 case BNX2X_DCBX_STATE_TX_PAUSED:
535 DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_PAUSED\n"); 614 DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_PAUSED\n");
@@ -1505,8 +1584,7 @@ static void bnx2x_pfc_fw_struct_e2(struct bnx2x *bp)
1505 bnx2x_dcbx_print_cos_params(bp, pfc_fw_cfg); 1584 bnx2x_dcbx_print_cos_params(bp, pfc_fw_cfg);
1506} 1585}
1507/* DCB netlink */ 1586/* DCB netlink */
1508#ifdef BCM_DCB 1587#ifdef BCM_DCBNL
1509#include <linux/dcbnl.h>
1510 1588
1511#define BNX2X_DCBX_CAPS (DCB_CAP_DCBX_LLD_MANAGED | \ 1589#define BNX2X_DCBX_CAPS (DCB_CAP_DCBX_LLD_MANAGED | \
1512 DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_STATIC) 1590 DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_STATIC)
@@ -1816,32 +1894,6 @@ static void bnx2x_dcbnl_set_pfc_state(struct net_device *netdev, u8 state)
1816 bp->dcbx_config_params.admin_pfc_enable = (state ? 1 : 0); 1894 bp->dcbx_config_params.admin_pfc_enable = (state ? 1 : 0);
1817} 1895}
1818 1896
1819static bool bnx2x_app_is_equal(struct dcbx_app_priority_entry *app_ent,
1820 u8 idtype, u16 idval)
1821{
1822 if (!(app_ent->appBitfield & DCBX_APP_ENTRY_VALID))
1823 return false;
1824
1825 switch (idtype) {
1826 case DCB_APP_IDTYPE_ETHTYPE:
1827 if ((app_ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) !=
1828 DCBX_APP_SF_ETH_TYPE)
1829 return false;
1830 break;
1831 case DCB_APP_IDTYPE_PORTNUM:
1832 if ((app_ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) !=
1833 DCBX_APP_SF_PORT)
1834 return false;
1835 break;
1836 default:
1837 return false;
1838 }
1839 if (app_ent->app_id != idval)
1840 return false;
1841
1842 return true;
1843}
1844
1845static void bnx2x_admin_app_set_ent( 1897static void bnx2x_admin_app_set_ent(
1846 struct bnx2x_admin_priority_app_table *app_ent, 1898 struct bnx2x_admin_priority_app_table *app_ent,
1847 u8 idtype, u16 idval, u8 up) 1899 u8 idtype, u16 idval, u8 up)
@@ -1943,30 +1995,6 @@ static u8 bnx2x_dcbnl_set_app_up(struct net_device *netdev, u8 idtype,
1943 return bnx2x_set_admin_app_up(bp, idtype, idval, up); 1995 return bnx2x_set_admin_app_up(bp, idtype, idval, up);
1944} 1996}
1945 1997
1946static u8 bnx2x_dcbnl_get_app_up(struct net_device *netdev, u8 idtype,
1947 u16 idval)
1948{
1949 int i;
1950 u8 up = 0;
1951
1952 struct bnx2x *bp = netdev_priv(netdev);
1953 DP(NETIF_MSG_LINK, "app_type %d, app_id 0x%x\n", idtype, idval);
1954
1955 /* iterate over the app entries looking for idtype and idval */
1956 for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++)
1957 if (bnx2x_app_is_equal(&bp->dcbx_local_feat.app.app_pri_tbl[i],
1958 idtype, idval))
1959 break;
1960
1961 if (i < DCBX_MAX_APP_PROTOCOL)
1962 /* if found return up */
1963 up = bp->dcbx_local_feat.app.app_pri_tbl[i].pri_bitmap;
1964 else
1965 DP(NETIF_MSG_LINK, "app not found\n");
1966
1967 return up;
1968}
1969
1970static u8 bnx2x_dcbnl_get_dcbx(struct net_device *netdev) 1998static u8 bnx2x_dcbnl_get_dcbx(struct net_device *netdev)
1971{ 1999{
1972 struct bnx2x *bp = netdev_priv(netdev); 2000 struct bnx2x *bp = netdev_priv(netdev);
@@ -2107,7 +2135,6 @@ const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops = {
2107 .setnumtcs = bnx2x_dcbnl_set_numtcs, 2135 .setnumtcs = bnx2x_dcbnl_set_numtcs,
2108 .getpfcstate = bnx2x_dcbnl_get_pfc_state, 2136 .getpfcstate = bnx2x_dcbnl_get_pfc_state,
2109 .setpfcstate = bnx2x_dcbnl_set_pfc_state, 2137 .setpfcstate = bnx2x_dcbnl_set_pfc_state,
2110 .getapp = bnx2x_dcbnl_get_app_up,
2111 .setapp = bnx2x_dcbnl_set_app_up, 2138 .setapp = bnx2x_dcbnl_set_app_up,
2112 .getdcbx = bnx2x_dcbnl_get_dcbx, 2139 .getdcbx = bnx2x_dcbnl_get_dcbx,
2113 .setdcbx = bnx2x_dcbnl_set_dcbx, 2140 .setdcbx = bnx2x_dcbnl_set_dcbx,
@@ -2115,4 +2142,4 @@ const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops = {
2115 .setfeatcfg = bnx2x_dcbnl_set_featcfg, 2142 .setfeatcfg = bnx2x_dcbnl_set_featcfg,
2116}; 2143};
2117 2144
2118#endif /* BCM_DCB */ 2145#endif /* BCM_DCBNL */
diff --git a/drivers/net/bnx2x/bnx2x_dcb.h b/drivers/net/bnx2x/bnx2x_dcb.h
index f650f98e409..71b8eda43bd 100644
--- a/drivers/net/bnx2x/bnx2x_dcb.h
+++ b/drivers/net/bnx2x/bnx2x_dcb.h
@@ -189,8 +189,9 @@ enum {
189void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state); 189void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state);
190 190
191/* DCB netlink */ 191/* DCB netlink */
192#ifdef BCM_DCB 192#ifdef BCM_DCBNL
193extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops; 193extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops;
194#endif /* BCM_DCB */ 194int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall);
195#endif /* BCM_DCBNL */
195 196
196#endif /* BNX2X_DCB_H */ 197#endif /* BNX2X_DCB_H */
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index ef2919987a1..85291d8b332 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -1619,7 +1619,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
1619 /* prepare the loopback packet */ 1619 /* prepare the loopback packet */
1620 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ? 1620 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
1621 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN); 1621 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
1622 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); 1622 skb = netdev_alloc_skb(bp->dev, fp_rx->rx_buf_size);
1623 if (!skb) { 1623 if (!skb) {
1624 rc = -ENOMEM; 1624 rc = -ENOMEM;
1625 goto test_loopback_exit; 1625 goto test_loopback_exit;
@@ -2133,6 +2133,59 @@ static int bnx2x_phys_id(struct net_device *dev, u32 data)
2133 return 0; 2133 return 0;
2134} 2134}
2135 2135
2136static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
2137 void *rules __always_unused)
2138{
2139 struct bnx2x *bp = netdev_priv(dev);
2140
2141 switch (info->cmd) {
2142 case ETHTOOL_GRXRINGS:
2143 info->data = BNX2X_NUM_ETH_QUEUES(bp);
2144 return 0;
2145
2146 default:
2147 return -EOPNOTSUPP;
2148 }
2149}
2150
2151static int bnx2x_get_rxfh_indir(struct net_device *dev,
2152 struct ethtool_rxfh_indir *indir)
2153{
2154 struct bnx2x *bp = netdev_priv(dev);
2155 size_t copy_size =
2156 min_t(size_t, indir->size, TSTORM_INDIRECTION_TABLE_SIZE);
2157
2158 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
2159 return -EOPNOTSUPP;
2160
2161 indir->size = TSTORM_INDIRECTION_TABLE_SIZE;
2162 memcpy(indir->ring_index, bp->rx_indir_table,
2163 copy_size * sizeof(bp->rx_indir_table[0]));
2164 return 0;
2165}
2166
2167static int bnx2x_set_rxfh_indir(struct net_device *dev,
2168 const struct ethtool_rxfh_indir *indir)
2169{
2170 struct bnx2x *bp = netdev_priv(dev);
2171 size_t i;
2172
2173 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
2174 return -EOPNOTSUPP;
2175
2176 /* Validate size and indices */
2177 if (indir->size != TSTORM_INDIRECTION_TABLE_SIZE)
2178 return -EINVAL;
2179 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
2180 if (indir->ring_index[i] >= BNX2X_NUM_ETH_QUEUES(bp))
2181 return -EINVAL;
2182
2183 memcpy(bp->rx_indir_table, indir->ring_index,
2184 indir->size * sizeof(bp->rx_indir_table[0]));
2185 bnx2x_push_indir_table(bp);
2186 return 0;
2187}
2188
2136static const struct ethtool_ops bnx2x_ethtool_ops = { 2189static const struct ethtool_ops bnx2x_ethtool_ops = {
2137 .get_settings = bnx2x_get_settings, 2190 .get_settings = bnx2x_get_settings,
2138 .set_settings = bnx2x_set_settings, 2191 .set_settings = bnx2x_set_settings,
@@ -2169,6 +2222,9 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
2169 .get_strings = bnx2x_get_strings, 2222 .get_strings = bnx2x_get_strings,
2170 .phys_id = bnx2x_phys_id, 2223 .phys_id = bnx2x_phys_id,
2171 .get_ethtool_stats = bnx2x_get_ethtool_stats, 2224 .get_ethtool_stats = bnx2x_get_ethtool_stats,
2225 .get_rxnfc = bnx2x_get_rxnfc,
2226 .get_rxfh_indir = bnx2x_get_rxfh_indir,
2227 .set_rxfh_indir = bnx2x_set_rxfh_indir,
2172}; 2228};
2173 2229
2174void bnx2x_set_ethtool_ops(struct net_device *netdev) 2230void bnx2x_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index 548f5631c0d..be503cc0a50 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -11,20 +11,27 @@
11 11
12#include "bnx2x_fw_defs.h" 12#include "bnx2x_fw_defs.h"
13 13
14#define FW_ENCODE_32BIT_PATTERN 0x1e1e1e1e
15
14struct license_key { 16struct license_key {
15 u32 reserved[6]; 17 u32 reserved[6];
16 18
17#if defined(__BIG_ENDIAN) 19 u32 max_iscsi_conn;
18 u16 max_iscsi_init_conn; 20#define BNX2X_MAX_ISCSI_TRGT_CONN_MASK 0xFFFF
19 u16 max_iscsi_trgt_conn; 21#define BNX2X_MAX_ISCSI_TRGT_CONN_SHIFT 0
20#elif defined(__LITTLE_ENDIAN) 22#define BNX2X_MAX_ISCSI_INIT_CONN_MASK 0xFFFF0000
21 u16 max_iscsi_trgt_conn; 23#define BNX2X_MAX_ISCSI_INIT_CONN_SHIFT 16
22 u16 max_iscsi_init_conn;
23#endif
24 24
25 u32 reserved_a[6]; 25 u32 reserved_a;
26}; 26
27 u32 max_fcoe_conn;
28#define BNX2X_MAX_FCOE_TRGT_CONN_MASK 0xFFFF
29#define BNX2X_MAX_FCOE_TRGT_CONN_SHIFT 0
30#define BNX2X_MAX_FCOE_INIT_CONN_MASK 0xFFFF0000
31#define BNX2X_MAX_FCOE_INIT_CONN_SHIFT 16
27 32
33 u32 reserved_b[4];
34};
28 35
29#define PORT_0 0 36#define PORT_0 0
30#define PORT_1 1 37#define PORT_1 1
@@ -237,8 +244,26 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
237#define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT 16 244#define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT 16
238 245
239 246
240 u32 Reserved0[16]; /* 0x158 */ 247 u32 Reserved0[3]; /* 0x158 */
241 248 /* Controls the TX laser of the SFP+ module */
249 u32 sfp_ctrl; /* 0x164 */
250#define PORT_HW_CFG_TX_LASER_MASK 0x000000FF
251#define PORT_HW_CFG_TX_LASER_SHIFT 0
252#define PORT_HW_CFG_TX_LASER_MDIO 0x00000000
253#define PORT_HW_CFG_TX_LASER_GPIO0 0x00000001
254#define PORT_HW_CFG_TX_LASER_GPIO1 0x00000002
255#define PORT_HW_CFG_TX_LASER_GPIO2 0x00000003
256#define PORT_HW_CFG_TX_LASER_GPIO3 0x00000004
257
258 /* Controls the fault module LED of the SFP+ */
259#define PORT_HW_CFG_FAULT_MODULE_LED_MASK 0x0000FF00
260#define PORT_HW_CFG_FAULT_MODULE_LED_SHIFT 8
261#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO0 0x00000000
262#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO1 0x00000100
263#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO2 0x00000200
264#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO3 0x00000300
265#define PORT_HW_CFG_FAULT_MODULE_LED_DISABLED 0x00000400
266 u32 Reserved01[12]; /* 0x158 */
242 /* for external PHY, or forced mode or during AN */ 267 /* for external PHY, or forced mode or during AN */
243 u16 xgxs_config_rx[4]; /* 0x198 */ 268 u16 xgxs_config_rx[4]; /* 0x198 */
244 269
@@ -246,12 +271,78 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
246 271
247 u32 Reserved1[56]; /* 0x1A8 */ 272 u32 Reserved1[56]; /* 0x1A8 */
248 u32 default_cfg; /* 0x288 */ 273 u32 default_cfg; /* 0x288 */
274#define PORT_HW_CFG_GPIO0_CONFIG_MASK 0x00000003
275#define PORT_HW_CFG_GPIO0_CONFIG_SHIFT 0
276#define PORT_HW_CFG_GPIO0_CONFIG_NA 0x00000000
277#define PORT_HW_CFG_GPIO0_CONFIG_LOW 0x00000001
278#define PORT_HW_CFG_GPIO0_CONFIG_HIGH 0x00000002
279#define PORT_HW_CFG_GPIO0_CONFIG_INPUT 0x00000003
280
281#define PORT_HW_CFG_GPIO1_CONFIG_MASK 0x0000000C
282#define PORT_HW_CFG_GPIO1_CONFIG_SHIFT 2
283#define PORT_HW_CFG_GPIO1_CONFIG_NA 0x00000000
284#define PORT_HW_CFG_GPIO1_CONFIG_LOW 0x00000004
285#define PORT_HW_CFG_GPIO1_CONFIG_HIGH 0x00000008
286#define PORT_HW_CFG_GPIO1_CONFIG_INPUT 0x0000000c
287
288#define PORT_HW_CFG_GPIO2_CONFIG_MASK 0x00000030
289#define PORT_HW_CFG_GPIO2_CONFIG_SHIFT 4
290#define PORT_HW_CFG_GPIO2_CONFIG_NA 0x00000000
291#define PORT_HW_CFG_GPIO2_CONFIG_LOW 0x00000010
292#define PORT_HW_CFG_GPIO2_CONFIG_HIGH 0x00000020
293#define PORT_HW_CFG_GPIO2_CONFIG_INPUT 0x00000030
294
295#define PORT_HW_CFG_GPIO3_CONFIG_MASK 0x000000C0
296#define PORT_HW_CFG_GPIO3_CONFIG_SHIFT 6
297#define PORT_HW_CFG_GPIO3_CONFIG_NA 0x00000000
298#define PORT_HW_CFG_GPIO3_CONFIG_LOW 0x00000040
299#define PORT_HW_CFG_GPIO3_CONFIG_HIGH 0x00000080
300#define PORT_HW_CFG_GPIO3_CONFIG_INPUT 0x000000c0
301
302 /*
303 * When KR link is required to be set to force which is not
304 * KR-compliant, this parameter determine what is the trigger for it.
305 * When GPIO is selected, low input will force the speed. Currently
306 * default speed is 1G. In the future, it may be widen to select the
307 * forced speed in with another parameter. Note when force-1G is
308 * enabled, it override option 56: Link Speed option.
309 */
310#define PORT_HW_CFG_FORCE_KR_ENABLER_MASK 0x00000F00
311#define PORT_HW_CFG_FORCE_KR_ENABLER_SHIFT 8
312#define PORT_HW_CFG_FORCE_KR_ENABLER_NOT_FORCED 0x00000000
313#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P0 0x00000100
314#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P0 0x00000200
315#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P0 0x00000300
316#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P0 0x00000400
317#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P1 0x00000500
318#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P1 0x00000600
319#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P1 0x00000700
320#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P1 0x00000800
321#define PORT_HW_CFG_FORCE_KR_ENABLER_FORCED 0x00000900
322 /* Enable to determine with which GPIO to reset the external phy */
323#define PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK 0x000F0000
324#define PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT 16
325#define PORT_HW_CFG_EXT_PHY_GPIO_RST_PHY_TYPE 0x00000000
326#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0 0x00010000
327#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0 0x00020000
328#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0 0x00030000
329#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0 0x00040000
330#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1 0x00050000
331#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1 0x00060000
332#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1 0x00070000
333#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1 0x00080000
249 /* Enable BAM on KR */ 334 /* Enable BAM on KR */
250#define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000 335#define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000
251#define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20 336#define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20
252#define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000 337#define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000
253#define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000 338#define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000
254 339
340 /* Enable Common Mode Sense */
341#define PORT_HW_CFG_ENABLE_CMS_MASK 0x00200000
342#define PORT_HW_CFG_ENABLE_CMS_SHIFT 21
343#define PORT_HW_CFG_ENABLE_CMS_DISABLED 0x00000000
344#define PORT_HW_CFG_ENABLE_CMS_ENABLED 0x00200000
345
255 u32 speed_capability_mask2; /* 0x28C */ 346 u32 speed_capability_mask2; /* 0x28C */
256#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF 347#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF
257#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0 348#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0
@@ -381,6 +472,7 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
381#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 0x00000900 472#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 0x00000900
382#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC 0x00000a00 473#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC 0x00000a00
383#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823 0x00000b00 474#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823 0x00000b00
475#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833 0x00000d00
384#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00 476#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00
385#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00 477#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00
386 478
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index dd1210fddff..f2f367d4e74 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -1,4 +1,4 @@
1/* Copyright 2008-2009 Broadcom Corporation 1/* Copyright 2008-2011 Broadcom Corporation
2 * 2 *
3 * Unless you and Broadcom execute a separate written software license 3 * Unless you and Broadcom execute a separate written software license
4 * agreement governing use of this software, this software is licensed to you 4 * agreement governing use of this software, this software is licensed to you
@@ -28,12 +28,13 @@
28 28
29/********************************************************/ 29/********************************************************/
30#define ETH_HLEN 14 30#define ETH_HLEN 14
31#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)/* 16 for CRC + VLAN + LLC */ 31/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
32#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)
32#define ETH_MIN_PACKET_SIZE 60 33#define ETH_MIN_PACKET_SIZE 60
33#define ETH_MAX_PACKET_SIZE 1500 34#define ETH_MAX_PACKET_SIZE 1500
34#define ETH_MAX_JUMBO_PACKET_SIZE 9600 35#define ETH_MAX_JUMBO_PACKET_SIZE 9600
35#define MDIO_ACCESS_TIMEOUT 1000 36#define MDIO_ACCESS_TIMEOUT 1000
36#define BMAC_CONTROL_RX_ENABLE 2 37#define BMAC_CONTROL_RX_ENABLE 2
37 38
38/***********************************************************/ 39/***********************************************************/
39/* Shortcut definitions */ 40/* Shortcut definitions */
@@ -79,7 +80,7 @@
79 80
80#define AUTONEG_CL37 SHARED_HW_CFG_AN_ENABLE_CL37 81#define AUTONEG_CL37 SHARED_HW_CFG_AN_ENABLE_CL37
81#define AUTONEG_CL73 SHARED_HW_CFG_AN_ENABLE_CL73 82#define AUTONEG_CL73 SHARED_HW_CFG_AN_ENABLE_CL73
82#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM 83#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM
83#define AUTONEG_PARALLEL \ 84#define AUTONEG_PARALLEL \
84 SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION 85 SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION
85#define AUTONEG_SGMII_FIBER_AUTODET \ 86#define AUTONEG_SGMII_FIBER_AUTODET \
@@ -112,10 +113,10 @@
112#define GP_STATUS_10G_KX4 \ 113#define GP_STATUS_10G_KX4 \
113 MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4 114 MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4
114 115
115#define LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD 116#define LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD
116#define LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD 117#define LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD
117#define LINK_100TXHD LINK_STATUS_SPEED_AND_DUPLEX_100TXHD 118#define LINK_100TXHD LINK_STATUS_SPEED_AND_DUPLEX_100TXHD
118#define LINK_100T4 LINK_STATUS_SPEED_AND_DUPLEX_100T4 119#define LINK_100T4 LINK_STATUS_SPEED_AND_DUPLEX_100T4
119#define LINK_100TXFD LINK_STATUS_SPEED_AND_DUPLEX_100TXFD 120#define LINK_100TXFD LINK_STATUS_SPEED_AND_DUPLEX_100TXFD
120#define LINK_1000THD LINK_STATUS_SPEED_AND_DUPLEX_1000THD 121#define LINK_1000THD LINK_STATUS_SPEED_AND_DUPLEX_1000THD
121#define LINK_1000TFD LINK_STATUS_SPEED_AND_DUPLEX_1000TFD 122#define LINK_1000TFD LINK_STATUS_SPEED_AND_DUPLEX_1000TFD
@@ -123,18 +124,18 @@
123#define LINK_2500THD LINK_STATUS_SPEED_AND_DUPLEX_2500THD 124#define LINK_2500THD LINK_STATUS_SPEED_AND_DUPLEX_2500THD
124#define LINK_2500TFD LINK_STATUS_SPEED_AND_DUPLEX_2500TFD 125#define LINK_2500TFD LINK_STATUS_SPEED_AND_DUPLEX_2500TFD
125#define LINK_2500XFD LINK_STATUS_SPEED_AND_DUPLEX_2500XFD 126#define LINK_2500XFD LINK_STATUS_SPEED_AND_DUPLEX_2500XFD
126#define LINK_10GTFD LINK_STATUS_SPEED_AND_DUPLEX_10GTFD 127#define LINK_10GTFD LINK_STATUS_SPEED_AND_DUPLEX_10GTFD
127#define LINK_10GXFD LINK_STATUS_SPEED_AND_DUPLEX_10GXFD 128#define LINK_10GXFD LINK_STATUS_SPEED_AND_DUPLEX_10GXFD
128#define LINK_12GTFD LINK_STATUS_SPEED_AND_DUPLEX_12GTFD 129#define LINK_12GTFD LINK_STATUS_SPEED_AND_DUPLEX_12GTFD
129#define LINK_12GXFD LINK_STATUS_SPEED_AND_DUPLEX_12GXFD 130#define LINK_12GXFD LINK_STATUS_SPEED_AND_DUPLEX_12GXFD
130#define LINK_12_5GTFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GTFD 131#define LINK_12_5GTFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GTFD
131#define LINK_12_5GXFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GXFD 132#define LINK_12_5GXFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GXFD
132#define LINK_13GTFD LINK_STATUS_SPEED_AND_DUPLEX_13GTFD 133#define LINK_13GTFD LINK_STATUS_SPEED_AND_DUPLEX_13GTFD
133#define LINK_13GXFD LINK_STATUS_SPEED_AND_DUPLEX_13GXFD 134#define LINK_13GXFD LINK_STATUS_SPEED_AND_DUPLEX_13GXFD
134#define LINK_15GTFD LINK_STATUS_SPEED_AND_DUPLEX_15GTFD 135#define LINK_15GTFD LINK_STATUS_SPEED_AND_DUPLEX_15GTFD
135#define LINK_15GXFD LINK_STATUS_SPEED_AND_DUPLEX_15GXFD 136#define LINK_15GXFD LINK_STATUS_SPEED_AND_DUPLEX_15GXFD
136#define LINK_16GTFD LINK_STATUS_SPEED_AND_DUPLEX_16GTFD 137#define LINK_16GTFD LINK_STATUS_SPEED_AND_DUPLEX_16GTFD
137#define LINK_16GXFD LINK_STATUS_SPEED_AND_DUPLEX_16GXFD 138#define LINK_16GXFD LINK_STATUS_SPEED_AND_DUPLEX_16GXFD
138 139
139#define PHY_XGXS_FLAG 0x1 140#define PHY_XGXS_FLAG 0x1
140#define PHY_SGMII_FLAG 0x2 141#define PHY_SGMII_FLAG 0x2
@@ -142,7 +143,7 @@
142 143
143/* */ 144/* */
144#define SFP_EEPROM_CON_TYPE_ADDR 0x2 145#define SFP_EEPROM_CON_TYPE_ADDR 0x2
145 #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7 146 #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7
146 #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21 147 #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21
147 148
148 149
@@ -153,15 +154,15 @@
153 154
154#define SFP_EEPROM_FC_TX_TECH_ADDR 0x8 155#define SFP_EEPROM_FC_TX_TECH_ADDR 0x8
155 #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4 156 #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4
156 #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE 0x8 157 #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE 0x8
157 158
158#define SFP_EEPROM_OPTIONS_ADDR 0x40 159#define SFP_EEPROM_OPTIONS_ADDR 0x40
159 #define SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK 0x1 160 #define SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK 0x1
160#define SFP_EEPROM_OPTIONS_SIZE 2 161#define SFP_EEPROM_OPTIONS_SIZE 2
161 162
162#define EDC_MODE_LINEAR 0x0022 163#define EDC_MODE_LINEAR 0x0022
163#define EDC_MODE_LIMITING 0x0044 164#define EDC_MODE_LIMITING 0x0044
164#define EDC_MODE_PASSIVE_DAC 0x0055 165#define EDC_MODE_PASSIVE_DAC 0x0055
165 166
166 167
167#define ETS_BW_LIMIT_CREDIT_UPPER_BOUND (0x5000) 168#define ETS_BW_LIMIT_CREDIT_UPPER_BOUND (0x5000)
@@ -170,24 +171,18 @@
170/* INTERFACE */ 171/* INTERFACE */
171/**********************************************************/ 172/**********************************************************/
172 173
173#define CL45_WR_OVER_CL22(_bp, _phy, _bank, _addr, _val) \ 174#define CL22_WR_OVER_CL45(_bp, _phy, _bank, _addr, _val) \
174 bnx2x_cl45_write(_bp, _phy, \ 175 bnx2x_cl45_write(_bp, _phy, \
175 (_phy)->def_md_devad, \ 176 (_phy)->def_md_devad, \
176 (_bank + (_addr & 0xf)), \ 177 (_bank + (_addr & 0xf)), \
177 _val) 178 _val)
178 179
179#define CL45_RD_OVER_CL22(_bp, _phy, _bank, _addr, _val) \ 180#define CL22_RD_OVER_CL45(_bp, _phy, _bank, _addr, _val) \
180 bnx2x_cl45_read(_bp, _phy, \ 181 bnx2x_cl45_read(_bp, _phy, \
181 (_phy)->def_md_devad, \ 182 (_phy)->def_md_devad, \
182 (_bank + (_addr & 0xf)), \ 183 (_bank + (_addr & 0xf)), \
183 _val) 184 _val)
184 185
185static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
186 u8 devad, u16 reg, u16 *ret_val);
187
188static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
189 u8 devad, u16 reg, u16 val);
190
191static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits) 186static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
192{ 187{
193 u32 val = REG_RD(bp, reg); 188 u32 val = REG_RD(bp, reg);
@@ -216,7 +211,7 @@ void bnx2x_ets_disabled(struct link_params *params)
216 211
217 DP(NETIF_MSG_LINK, "ETS disabled configuration\n"); 212 DP(NETIF_MSG_LINK, "ETS disabled configuration\n");
218 213
219 /** 214 /*
220 * mapping between entry priority to client number (0,1,2 -debug and 215 * mapping between entry priority to client number (0,1,2 -debug and
221 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST) 216 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
222 * 3bits client num. 217 * 3bits client num.
@@ -225,7 +220,7 @@ void bnx2x_ets_disabled(struct link_params *params)
225 */ 220 */
226 221
227 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688); 222 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688);
228 /** 223 /*
229 * Bitmap of 5bits length. Each bit specifies whether the entry behaves 224 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
230 * as strict. Bits 0,1,2 - debug and management entries, 3 - 225 * as strict. Bits 0,1,2 - debug and management entries, 3 -
231 * COS0 entry, 4 - COS1 entry. 226 * COS0 entry, 4 - COS1 entry.
@@ -237,12 +232,12 @@ void bnx2x_ets_disabled(struct link_params *params)
237 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7); 232 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
238 /* defines which entries (clients) are subjected to WFQ arbitration */ 233 /* defines which entries (clients) are subjected to WFQ arbitration */
239 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0); 234 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
240 /** 235 /*
241 * For strict priority entries defines the number of consecutive 236 * For strict priority entries defines the number of consecutive
242 * slots for the highest priority. 237 * slots for the highest priority.
243 */ 238 */
244 REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); 239 REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
245 /** 240 /*
246 * mapping between the CREDIT_WEIGHT registers and actual client 241 * mapping between the CREDIT_WEIGHT registers and actual client
247 * numbers 242 * numbers
248 */ 243 */
@@ -255,7 +250,7 @@ void bnx2x_ets_disabled(struct link_params *params)
255 REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0); 250 REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0);
256 /* ETS mode disable */ 251 /* ETS mode disable */
257 REG_WR(bp, PBF_REG_ETS_ENABLED, 0); 252 REG_WR(bp, PBF_REG_ETS_ENABLED, 0);
258 /** 253 /*
259 * If ETS mode is enabled (there is no strict priority) defines a WFQ 254 * If ETS mode is enabled (there is no strict priority) defines a WFQ
260 * weight for COS0/COS1. 255 * weight for COS0/COS1.
261 */ 256 */
@@ -268,24 +263,24 @@ void bnx2x_ets_disabled(struct link_params *params)
268 REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0); 263 REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
269} 264}
270 265
271void bnx2x_ets_bw_limit_common(const struct link_params *params) 266static void bnx2x_ets_bw_limit_common(const struct link_params *params)
272{ 267{
273 /* ETS disabled configuration */ 268 /* ETS disabled configuration */
274 struct bnx2x *bp = params->bp; 269 struct bnx2x *bp = params->bp;
275 DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n"); 270 DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
276 /** 271 /*
277 * defines which entries (clients) are subjected to WFQ arbitration 272 * defines which entries (clients) are subjected to WFQ arbitration
278 * COS0 0x8 273 * COS0 0x8
279 * COS1 0x10 274 * COS1 0x10
280 */ 275 */
281 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18); 276 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18);
282 /** 277 /*
283 * mapping between the ARB_CREDIT_WEIGHT registers and actual 278 * mapping between the ARB_CREDIT_WEIGHT registers and actual
284 * client numbers (WEIGHT_0 does not actually have to represent 279 * client numbers (WEIGHT_0 does not actually have to represent
285 * client 0) 280 * client 0)
286 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 281 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
287 * cos1-001 cos0-000 dbg1-100 dbg0-011 MCP-010 282 * cos1-001 cos0-000 dbg1-100 dbg0-011 MCP-010
288 */ 283 */
289 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0x111A); 284 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0x111A);
290 285
291 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, 286 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0,
@@ -298,14 +293,14 @@ void bnx2x_ets_bw_limit_common(const struct link_params *params)
298 293
299 /* Defines the number of consecutive slots for the strict priority */ 294 /* Defines the number of consecutive slots for the strict priority */
300 REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0); 295 REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
301 /** 296 /*
302 * Bitmap of 5bits length. Each bit specifies whether the entry behaves 297 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
303 * as strict. Bits 0,1,2 - debug and management entries, 3 - COS0 298 * as strict. Bits 0,1,2 - debug and management entries, 3 - COS0
304 * entry, 4 - COS1 entry. 299 * entry, 4 - COS1 entry.
305 * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT 300 * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
306 * bit4 bit3 bit2 bit1 bit0 301 * bit4 bit3 bit2 bit1 bit0
307 * MCP and debug are strict 302 * MCP and debug are strict
308 */ 303 */
309 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7); 304 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
310 305
311 /* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter.*/ 306 /* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter.*/
@@ -329,8 +324,7 @@ void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
329 if ((0 == total_bw) || 324 if ((0 == total_bw) ||
330 (0 == cos0_bw) || 325 (0 == cos0_bw) ||
331 (0 == cos1_bw)) { 326 (0 == cos1_bw)) {
332 DP(NETIF_MSG_LINK, 327 DP(NETIF_MSG_LINK, "Total BW can't be zero\n");
333 "bnx2x_ets_bw_limit: Total BW can't be zero\n");
334 return; 328 return;
335 } 329 }
336 330
@@ -355,7 +349,7 @@ u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
355 u32 val = 0; 349 u32 val = 0;
356 350
357 DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n"); 351 DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n");
358 /** 352 /*
359 * Bitmap of 5bits length. Each bit specifies whether the entry behaves 353 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
360 * as strict. Bits 0,1,2 - debug and management entries, 354 * as strict. Bits 0,1,2 - debug and management entries,
361 * 3 - COS0 entry, 4 - COS1 entry. 355 * 3 - COS0 entry, 4 - COS1 entry.
@@ -364,7 +358,7 @@ u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
364 * MCP and debug are strict 358 * MCP and debug are strict
365 */ 359 */
366 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F); 360 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F);
367 /** 361 /*
368 * For strict priority entries defines the number of consecutive slots 362 * For strict priority entries defines the number of consecutive slots
369 * for the highest priority. 363 * for the highest priority.
370 */ 364 */
@@ -377,14 +371,14 @@ u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
377 /* Defines the number of consecutive slots for the strict priority */ 371 /* Defines the number of consecutive slots for the strict priority */
378 REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos); 372 REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos);
379 373
380 /** 374 /*
381 * mapping between entry priority to client number (0,1,2 -debug and 375 * mapping between entry priority to client number (0,1,2 -debug and
382 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST) 376 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
383 * 3bits client num. 377 * 3bits client num.
384 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 378 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
385 * dbg0-010 dbg1-001 cos1-100 cos0-011 MCP-000 379 * dbg0-010 dbg1-001 cos1-100 cos0-011 MCP-000
386 * dbg0-010 dbg1-001 cos0-011 cos1-100 MCP-000 380 * dbg0-010 dbg1-001 cos0-011 cos1-100 MCP-000
387 */ 381 */
388 val = (0 == strict_cos) ? 0x2318 : 0x22E0; 382 val = (0 == strict_cos) ? 0x2318 : 0x22E0;
389 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val); 383 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val);
390 384
@@ -471,7 +465,7 @@ void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
471/* MAC/PBF section */ 465/* MAC/PBF section */
472/******************************************************************/ 466/******************************************************************/
473static void bnx2x_emac_init(struct link_params *params, 467static void bnx2x_emac_init(struct link_params *params,
474 struct link_vars *vars) 468 struct link_vars *vars)
475{ 469{
476 /* reset and unreset the emac core */ 470 /* reset and unreset the emac core */
477 struct bnx2x *bp = params->bp; 471 struct bnx2x *bp = params->bp;
@@ -481,10 +475,10 @@ static void bnx2x_emac_init(struct link_params *params,
481 u16 timeout; 475 u16 timeout;
482 476
483 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 477 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
484 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port)); 478 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
485 udelay(5); 479 udelay(5);
486 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 480 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
487 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port)); 481 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
488 482
489 /* init emac - use read-modify-write */ 483 /* init emac - use read-modify-write */
490 /* self clear reset */ 484 /* self clear reset */
@@ -515,7 +509,7 @@ static void bnx2x_emac_init(struct link_params *params,
515} 509}
516 510
517static u8 bnx2x_emac_enable(struct link_params *params, 511static u8 bnx2x_emac_enable(struct link_params *params,
518 struct link_vars *vars, u8 lb) 512 struct link_vars *vars, u8 lb)
519{ 513{
520 struct bnx2x *bp = params->bp; 514 struct bnx2x *bp = params->bp;
521 u8 port = params->port; 515 u8 port = params->port;
@@ -527,55 +521,33 @@ static u8 bnx2x_emac_enable(struct link_params *params,
527 /* enable emac and not bmac */ 521 /* enable emac and not bmac */
528 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1); 522 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1);
529 523
530 /* for paladium */
531 if (CHIP_REV_IS_EMUL(bp)) {
532 /* Use lane 1 (of lanes 0-3) */
533 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
534 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL +
535 port*4, 1);
536 }
537 /* for fpga */
538 else
539
540 if (CHIP_REV_IS_FPGA(bp)) {
541 /* Use lane 1 (of lanes 0-3) */
542 DP(NETIF_MSG_LINK, "bnx2x_emac_enable: Setting FPGA\n");
543
544 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
545 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4,
546 0);
547 } else
548 /* ASIC */ 524 /* ASIC */
549 if (vars->phy_flags & PHY_XGXS_FLAG) { 525 if (vars->phy_flags & PHY_XGXS_FLAG) {
550 u32 ser_lane = ((params->lane_config & 526 u32 ser_lane = ((params->lane_config &
551 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> 527 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
552 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); 528 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
553 529
554 DP(NETIF_MSG_LINK, "XGXS\n"); 530 DP(NETIF_MSG_LINK, "XGXS\n");
555 /* select the master lanes (out of 0-3) */ 531 /* select the master lanes (out of 0-3) */
556 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + 532 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, ser_lane);
557 port*4, ser_lane);
558 /* select XGXS */ 533 /* select XGXS */
559 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + 534 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
560 port*4, 1);
561 535
562 } else { /* SerDes */ 536 } else { /* SerDes */
563 DP(NETIF_MSG_LINK, "SerDes\n"); 537 DP(NETIF_MSG_LINK, "SerDes\n");
564 /* select SerDes */ 538 /* select SerDes */
565 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + 539 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
566 port*4, 0);
567 } 540 }
568 541
569 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE, 542 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
570 EMAC_RX_MODE_RESET); 543 EMAC_RX_MODE_RESET);
571 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE, 544 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
572 EMAC_TX_MODE_RESET); 545 EMAC_TX_MODE_RESET);
573 546
574 if (CHIP_REV_IS_SLOW(bp)) { 547 if (CHIP_REV_IS_SLOW(bp)) {
575 /* config GMII mode */ 548 /* config GMII mode */
576 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); 549 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
577 EMAC_WR(bp, EMAC_REG_EMAC_MODE, 550 EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII));
578 (val | EMAC_MODE_PORT_GMII));
579 } else { /* ASIC */ 551 } else { /* ASIC */
580 /* pause enable/disable */ 552 /* pause enable/disable */
581 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE, 553 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
@@ -605,14 +577,14 @@ static u8 bnx2x_emac_enable(struct link_params *params,
605 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE); 577 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
606 val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS; 578 val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
607 579
608 /** 580 /*
609 * Setting this bit causes MAC control frames (except for pause 581 * Setting this bit causes MAC control frames (except for pause
610 * frames) to be passed on for processing. This setting has no 582 * frames) to be passed on for processing. This setting has no
611 * affect on the operation of the pause frames. This bit effects 583 * affect on the operation of the pause frames. This bit effects
612 * all packets regardless of RX Parser packet sorting logic. 584 * all packets regardless of RX Parser packet sorting logic.
613 * Turn the PFC off to make sure we are in Xon state before 585 * Turn the PFC off to make sure we are in Xon state before
614 * enabling it. 586 * enabling it.
615 */ 587 */
616 EMAC_WR(bp, EMAC_REG_RX_PFC_MODE, 0); 588 EMAC_WR(bp, EMAC_REG_RX_PFC_MODE, 0);
617 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) { 589 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) {
618 DP(NETIF_MSG_LINK, "PFC is enabled\n"); 590 DP(NETIF_MSG_LINK, "PFC is enabled\n");
@@ -666,16 +638,7 @@ static u8 bnx2x_emac_enable(struct link_params *params,
666 REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val); 638 REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val);
667 REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1); 639 REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1);
668 640
669 if (CHIP_REV_IS_EMUL(bp)) { 641 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0);
670 /* take the BigMac out of reset */
671 REG_WR(bp,
672 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
673 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
674
675 /* enable access for bmac registers */
676 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
677 } else
678 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0);
679 642
680 vars->mac_type = MAC_TYPE_EMAC; 643 vars->mac_type = MAC_TYPE_EMAC;
681 return 0; 644 return 0;
@@ -731,8 +694,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
731 val |= (1<<5); 694 val |= (1<<5);
732 wb_data[0] = val; 695 wb_data[0] = val;
733 wb_data[1] = 0; 696 wb_data[1] = 0;
734 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL, 697 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL, wb_data, 2);
735 wb_data, 2);
736 udelay(30); 698 udelay(30);
737 699
738 /* Tx control */ 700 /* Tx control */
@@ -768,12 +730,12 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
768 730
769 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2); 731 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2);
770 732
771 /** 733 /*
772 * Set Time (based unit is 512 bit time) between automatic 734 * Set Time (based unit is 512 bit time) between automatic
773 * re-sending of PP packets amd enable automatic re-send of 735 * re-sending of PP packets amd enable automatic re-send of
774 * Per-Priroity Packet as long as pp_gen is asserted and 736 * Per-Priroity Packet as long as pp_gen is asserted and
775 * pp_disable is low. 737 * pp_disable is low.
776 */ 738 */
777 val = 0x8000; 739 val = 0x8000;
778 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) 740 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
779 val |= (1<<16); /* enable automatic re-send */ 741 val |= (1<<16); /* enable automatic re-send */
@@ -781,7 +743,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
781 wb_data[0] = val; 743 wb_data[0] = val;
782 wb_data[1] = 0; 744 wb_data[1] = 0;
783 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_PAUSE_CONTROL, 745 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_PAUSE_CONTROL,
784 wb_data, 2); 746 wb_data, 2);
785 747
786 /* mac control */ 748 /* mac control */
787 val = 0x3; /* Enable RX and TX */ 749 val = 0x3; /* Enable RX and TX */
@@ -795,8 +757,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
795 757
796 wb_data[0] = val; 758 wb_data[0] = val;
797 wb_data[1] = 0; 759 wb_data[1] = 0;
798 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, 760 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
799 wb_data, 2);
800} 761}
801 762
802static void bnx2x_update_pfc_brb(struct link_params *params, 763static void bnx2x_update_pfc_brb(struct link_params *params,
@@ -825,17 +786,25 @@ static void bnx2x_update_pfc_brb(struct link_params *params,
825 full_xon_th = 786 full_xon_th =
826 PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE; 787 PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE;
827 } 788 }
828 /* The number of free blocks below which the pause signal to class 0 789 /*
829 of MAC #n is asserted. n=0,1 */ 790 * The number of free blocks below which the pause signal to class 0
791 * of MAC #n is asserted. n=0,1
792 */
830 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 , pause_xoff_th); 793 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 , pause_xoff_th);
831 /* The number of free blocks above which the pause signal to class 0 794 /*
832 of MAC #n is de-asserted. n=0,1 */ 795 * The number of free blocks above which the pause signal to class 0
796 * of MAC #n is de-asserted. n=0,1
797 */
833 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , pause_xon_th); 798 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , pause_xon_th);
834 /* The number of free blocks below which the full signal to class 0 799 /*
835 of MAC #n is asserted. n=0,1 */ 800 * The number of free blocks below which the full signal to class 0
801 * of MAC #n is asserted. n=0,1
802 */
836 REG_WR(bp, BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , full_xoff_th); 803 REG_WR(bp, BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , full_xoff_th);
837 /* The number of free blocks above which the full signal to class 0 804 /*
838 of MAC #n is de-asserted. n=0,1 */ 805 * The number of free blocks above which the full signal to class 0
806 * of MAC #n is de-asserted. n=0,1
807 */
839 REG_WR(bp, BRB1_REG_FULL_0_XON_THRESHOLD_0 , full_xon_th); 808 REG_WR(bp, BRB1_REG_FULL_0_XON_THRESHOLD_0 , full_xon_th);
840 809
841 if (set_pfc && pfc_params) { 810 if (set_pfc && pfc_params) {
@@ -859,25 +828,25 @@ static void bnx2x_update_pfc_brb(struct link_params *params,
859 full_xon_th = 828 full_xon_th =
860 PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE; 829 PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE;
861 } 830 }
862 /** 831 /*
863 * The number of free blocks below which the pause signal to 832 * The number of free blocks below which the pause signal to
864 * class 1 of MAC #n is asserted. n=0,1 833 * class 1 of MAC #n is asserted. n=0,1
865 **/ 834 */
866 REG_WR(bp, BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0, pause_xoff_th); 835 REG_WR(bp, BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0, pause_xoff_th);
867 /** 836 /*
868 * The number of free blocks above which the pause signal to 837 * The number of free blocks above which the pause signal to
869 * class 1 of MAC #n is de-asserted. n=0,1 838 * class 1 of MAC #n is de-asserted. n=0,1
870 **/ 839 */
871 REG_WR(bp, BRB1_REG_PAUSE_1_XON_THRESHOLD_0, pause_xon_th); 840 REG_WR(bp, BRB1_REG_PAUSE_1_XON_THRESHOLD_0, pause_xon_th);
872 /** 841 /*
873 * The number of free blocks below which the full signal to 842 * The number of free blocks below which the full signal to
874 * class 1 of MAC #n is asserted. n=0,1 843 * class 1 of MAC #n is asserted. n=0,1
875 **/ 844 */
876 REG_WR(bp, BRB1_REG_FULL_1_XOFF_THRESHOLD_0, full_xoff_th); 845 REG_WR(bp, BRB1_REG_FULL_1_XOFF_THRESHOLD_0, full_xoff_th);
877 /** 846 /*
878 * The number of free blocks above which the full signal to 847 * The number of free blocks above which the full signal to
879 * class 1 of MAC #n is de-asserted. n=0,1 848 * class 1 of MAC #n is de-asserted. n=0,1
880 **/ 849 */
881 REG_WR(bp, BRB1_REG_FULL_1_XON_THRESHOLD_0, full_xon_th); 850 REG_WR(bp, BRB1_REG_FULL_1_XON_THRESHOLD_0, full_xon_th);
882 } 851 }
883} 852}
@@ -896,7 +865,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
896 FEATURE_CONFIG_PFC_ENABLED; 865 FEATURE_CONFIG_PFC_ENABLED;
897 DP(NETIF_MSG_LINK, "updating pfc nig parameters\n"); 866 DP(NETIF_MSG_LINK, "updating pfc nig parameters\n");
898 867
899 /** 868 /*
900 * When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set 869 * When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set
901 * MAC control frames (that are not pause packets) 870 * MAC control frames (that are not pause packets)
902 * will be forwarded to the XCM. 871 * will be forwarded to the XCM.
@@ -904,7 +873,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
904 xcm_mask = REG_RD(bp, 873 xcm_mask = REG_RD(bp,
905 port ? NIG_REG_LLH1_XCM_MASK : 874 port ? NIG_REG_LLH1_XCM_MASK :
906 NIG_REG_LLH0_XCM_MASK); 875 NIG_REG_LLH0_XCM_MASK);
907 /** 876 /*
908 * nig params will override non PFC params, since it's possible to 877 * nig params will override non PFC params, since it's possible to
909 * do transition from PFC to SAFC 878 * do transition from PFC to SAFC
910 */ 879 */
@@ -994,7 +963,7 @@ void bnx2x_update_pfc(struct link_params *params,
994 struct link_vars *vars, 963 struct link_vars *vars,
995 struct bnx2x_nig_brb_pfc_port_params *pfc_params) 964 struct bnx2x_nig_brb_pfc_port_params *pfc_params)
996{ 965{
997 /** 966 /*
998 * The PFC and pause are orthogonal to one another, meaning when 967 * The PFC and pause are orthogonal to one another, meaning when
999 * PFC is enabled, the pause are disabled, and when PFC is 968 * PFC is enabled, the pause are disabled, and when PFC is
1000 * disabled, pause are set according to the pause result. 969 * disabled, pause are set according to the pause result.
@@ -1035,7 +1004,7 @@ void bnx2x_update_pfc(struct link_params *params,
1035 1004
1036static u8 bnx2x_bmac1_enable(struct link_params *params, 1005static u8 bnx2x_bmac1_enable(struct link_params *params,
1037 struct link_vars *vars, 1006 struct link_vars *vars,
1038 u8 is_lb) 1007 u8 is_lb)
1039{ 1008{
1040 struct bnx2x *bp = params->bp; 1009 struct bnx2x *bp = params->bp;
1041 u8 port = params->port; 1010 u8 port = params->port;
@@ -1049,9 +1018,8 @@ static u8 bnx2x_bmac1_enable(struct link_params *params,
1049 /* XGXS control */ 1018 /* XGXS control */
1050 wb_data[0] = 0x3c; 1019 wb_data[0] = 0x3c;
1051 wb_data[1] = 0; 1020 wb_data[1] = 0;
1052 REG_WR_DMAE(bp, bmac_addr + 1021 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
1053 BIGMAC_REGISTER_BMAC_XGXS_CONTROL, 1022 wb_data, 2);
1054 wb_data, 2);
1055 1023
1056 /* tx MAC SA */ 1024 /* tx MAC SA */
1057 wb_data[0] = ((params->mac_addr[2] << 24) | 1025 wb_data[0] = ((params->mac_addr[2] << 24) |
@@ -1060,8 +1028,7 @@ static u8 bnx2x_bmac1_enable(struct link_params *params,
1060 params->mac_addr[5]); 1028 params->mac_addr[5]);
1061 wb_data[1] = ((params->mac_addr[0] << 8) | 1029 wb_data[1] = ((params->mac_addr[0] << 8) |
1062 params->mac_addr[1]); 1030 params->mac_addr[1]);
1063 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, 1031 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2);
1064 wb_data, 2);
1065 1032
1066 /* mac control */ 1033 /* mac control */
1067 val = 0x3; 1034 val = 0x3;
@@ -1071,43 +1038,30 @@ static u8 bnx2x_bmac1_enable(struct link_params *params,
1071 } 1038 }
1072 wb_data[0] = val; 1039 wb_data[0] = val;
1073 wb_data[1] = 0; 1040 wb_data[1] = 0;
1074 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, 1041 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2);
1075 wb_data, 2);
1076 1042
1077 /* set rx mtu */ 1043 /* set rx mtu */
1078 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; 1044 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
1079 wb_data[1] = 0; 1045 wb_data[1] = 0;
1080 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, 1046 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2);
1081 wb_data, 2);
1082 1047
1083 bnx2x_update_pfc_bmac1(params, vars); 1048 bnx2x_update_pfc_bmac1(params, vars);
1084 1049
1085 /* set tx mtu */ 1050 /* set tx mtu */
1086 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; 1051 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
1087 wb_data[1] = 0; 1052 wb_data[1] = 0;
1088 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, 1053 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2);
1089 wb_data, 2);
1090 1054
1091 /* set cnt max size */ 1055 /* set cnt max size */
1092 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; 1056 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
1093 wb_data[1] = 0; 1057 wb_data[1] = 0;
1094 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, 1058 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2);
1095 wb_data, 2);
1096 1059
1097 /* configure safc */ 1060 /* configure safc */
1098 wb_data[0] = 0x1000200; 1061 wb_data[0] = 0x1000200;
1099 wb_data[1] = 0; 1062 wb_data[1] = 0;
1100 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS, 1063 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
1101 wb_data, 2); 1064 wb_data, 2);
1102 /* fix for emulation */
1103 if (CHIP_REV_IS_EMUL(bp)) {
1104 wb_data[0] = 0xf000;
1105 wb_data[1] = 0;
1106 REG_WR_DMAE(bp,
1107 bmac_addr + BIGMAC_REGISTER_TX_PAUSE_THRESHOLD,
1108 wb_data, 2);
1109 }
1110
1111 1065
1112 return 0; 1066 return 0;
1113} 1067}
@@ -1126,16 +1080,14 @@ static u8 bnx2x_bmac2_enable(struct link_params *params,
1126 1080
1127 wb_data[0] = 0; 1081 wb_data[0] = 0;
1128 wb_data[1] = 0; 1082 wb_data[1] = 0;
1129 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, 1083 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
1130 wb_data, 2);
1131 udelay(30); 1084 udelay(30);
1132 1085
1133 /* XGXS control: Reset phy HW, MDIO registers, PHY PLL and BMAC */ 1086 /* XGXS control: Reset phy HW, MDIO registers, PHY PLL and BMAC */
1134 wb_data[0] = 0x3c; 1087 wb_data[0] = 0x3c;
1135 wb_data[1] = 0; 1088 wb_data[1] = 0;
1136 REG_WR_DMAE(bp, bmac_addr + 1089 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_XGXS_CONTROL,
1137 BIGMAC2_REGISTER_BMAC_XGXS_CONTROL, 1090 wb_data, 2);
1138 wb_data, 2);
1139 1091
1140 udelay(30); 1092 udelay(30);
1141 1093
@@ -1147,7 +1099,7 @@ static u8 bnx2x_bmac2_enable(struct link_params *params,
1147 wb_data[1] = ((params->mac_addr[0] << 8) | 1099 wb_data[1] = ((params->mac_addr[0] << 8) |
1148 params->mac_addr[1]); 1100 params->mac_addr[1]);
1149 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_SOURCE_ADDR, 1101 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_SOURCE_ADDR,
1150 wb_data, 2); 1102 wb_data, 2);
1151 1103
1152 udelay(30); 1104 udelay(30);
1153 1105
@@ -1155,27 +1107,24 @@ static u8 bnx2x_bmac2_enable(struct link_params *params,
1155 wb_data[0] = 0x1000200; 1107 wb_data[0] = 0x1000200;
1156 wb_data[1] = 0; 1108 wb_data[1] = 0;
1157 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS, 1109 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS,
1158 wb_data, 2); 1110 wb_data, 2);
1159 udelay(30); 1111 udelay(30);
1160 1112
1161 /* set rx mtu */ 1113 /* set rx mtu */
1162 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; 1114 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
1163 wb_data[1] = 0; 1115 wb_data[1] = 0;
1164 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, 1116 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2);
1165 wb_data, 2);
1166 udelay(30); 1117 udelay(30);
1167 1118
1168 /* set tx mtu */ 1119 /* set tx mtu */
1169 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; 1120 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
1170 wb_data[1] = 0; 1121 wb_data[1] = 0;
1171 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, 1122 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2);
1172 wb_data, 2);
1173 udelay(30); 1123 udelay(30);
1174 /* set cnt max size */ 1124 /* set cnt max size */
1175 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2; 1125 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2;
1176 wb_data[1] = 0; 1126 wb_data[1] = 0;
1177 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, 1127 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2);
1178 wb_data, 2);
1179 udelay(30); 1128 udelay(30);
1180 bnx2x_update_pfc_bmac2(params, vars, is_lb); 1129 bnx2x_update_pfc_bmac2(params, vars, is_lb);
1181 1130
@@ -1191,11 +1140,11 @@ static u8 bnx2x_bmac_enable(struct link_params *params,
1191 u32 val; 1140 u32 val;
1192 /* reset and unreset the BigMac */ 1141 /* reset and unreset the BigMac */
1193 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 1142 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
1194 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); 1143 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
1195 msleep(1); 1144 msleep(1);
1196 1145
1197 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 1146 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
1198 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); 1147 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
1199 1148
1200 /* enable access for bmac registers */ 1149 /* enable access for bmac registers */
1201 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1); 1150 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
@@ -1230,15 +1179,14 @@ static void bnx2x_update_mng(struct link_params *params, u32 link_status)
1230 struct bnx2x *bp = params->bp; 1179 struct bnx2x *bp = params->bp;
1231 1180
1232 REG_WR(bp, params->shmem_base + 1181 REG_WR(bp, params->shmem_base +
1233 offsetof(struct shmem_region, 1182 offsetof(struct shmem_region,
1234 port_mb[params->port].link_status), 1183 port_mb[params->port].link_status), link_status);
1235 link_status);
1236} 1184}
1237 1185
1238static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port) 1186static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
1239{ 1187{
1240 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM : 1188 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
1241 NIG_REG_INGRESS_BMAC0_MEM; 1189 NIG_REG_INGRESS_BMAC0_MEM;
1242 u32 wb_data[2]; 1190 u32 wb_data[2];
1243 u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4); 1191 u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4);
1244 1192
@@ -1250,12 +1198,12 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
1250 if (CHIP_IS_E2(bp)) { 1198 if (CHIP_IS_E2(bp)) {
1251 /* Clear Rx Enable bit in BMAC_CONTROL register */ 1199 /* Clear Rx Enable bit in BMAC_CONTROL register */
1252 REG_RD_DMAE(bp, bmac_addr + 1200 REG_RD_DMAE(bp, bmac_addr +
1253 BIGMAC2_REGISTER_BMAC_CONTROL, 1201 BIGMAC2_REGISTER_BMAC_CONTROL,
1254 wb_data, 2); 1202 wb_data, 2);
1255 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE; 1203 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
1256 REG_WR_DMAE(bp, bmac_addr + 1204 REG_WR_DMAE(bp, bmac_addr +
1257 BIGMAC2_REGISTER_BMAC_CONTROL, 1205 BIGMAC2_REGISTER_BMAC_CONTROL,
1258 wb_data, 2); 1206 wb_data, 2);
1259 } else { 1207 } else {
1260 /* Clear Rx Enable bit in BMAC_CONTROL register */ 1208 /* Clear Rx Enable bit in BMAC_CONTROL register */
1261 REG_RD_DMAE(bp, bmac_addr + 1209 REG_RD_DMAE(bp, bmac_addr +
@@ -1271,7 +1219,7 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
1271} 1219}
1272 1220
1273static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl, 1221static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
1274 u32 line_speed) 1222 u32 line_speed)
1275{ 1223{
1276 struct bnx2x *bp = params->bp; 1224 struct bnx2x *bp = params->bp;
1277 u8 port = params->port; 1225 u8 port = params->port;
@@ -1308,7 +1256,7 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
1308 /* update threshold */ 1256 /* update threshold */
1309 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0); 1257 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
1310 /* update init credit */ 1258 /* update init credit */
1311 init_crd = 778; /* (800-18-4) */ 1259 init_crd = 778; /* (800-18-4) */
1312 1260
1313 } else { 1261 } else {
1314 u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE + 1262 u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE +
@@ -1353,6 +1301,23 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
1353 return 0; 1301 return 0;
1354} 1302}
1355 1303
1304/*
1305 * get_emac_base
1306 *
1307 * @param cb
1308 * @param mdc_mdio_access
1309 * @param port
1310 *
1311 * @return u32
1312 *
1313 * This function selects the MDC/MDIO access (through emac0 or
1314 * emac1) depend on the mdc_mdio_access, port, port swapped. Each
1315 * phy has a default access mode, which could also be overridden
1316 * by nvram configuration. This parameter, whether this is the
1317 * default phy configuration, or the nvram overrun
1318 * configuration, is passed here as mdc_mdio_access and selects
1319 * the emac_base for the CL45 read/writes operations
1320 */
1356static u32 bnx2x_get_emac_base(struct bnx2x *bp, 1321static u32 bnx2x_get_emac_base(struct bnx2x *bp,
1357 u32 mdc_mdio_access, u8 port) 1322 u32 mdc_mdio_access, u8 port)
1358{ 1323{
@@ -1385,13 +1350,16 @@ static u32 bnx2x_get_emac_base(struct bnx2x *bp,
1385 1350
1386} 1351}
1387 1352
1388u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, 1353/******************************************************************/
1389 u8 devad, u16 reg, u16 val) 1354/* CL45 access functions */
1355/******************************************************************/
1356static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
1357 u8 devad, u16 reg, u16 val)
1390{ 1358{
1391 u32 tmp, saved_mode; 1359 u32 tmp, saved_mode;
1392 u8 i, rc = 0; 1360 u8 i, rc = 0;
1393 1361 /*
1394 /* set clause 45 mode, slow down the MDIO clock to 2.5MHz 1362 * Set clause 45 mode, slow down the MDIO clock to 2.5MHz
1395 * (a value of 49==0x31) and make sure that the AUTO poll is off 1363 * (a value of 49==0x31) and make sure that the AUTO poll is off
1396 */ 1364 */
1397 1365
@@ -1414,8 +1382,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
1414 for (i = 0; i < 50; i++) { 1382 for (i = 0; i < 50; i++) {
1415 udelay(10); 1383 udelay(10);
1416 1384
1417 tmp = REG_RD(bp, phy->mdio_ctrl + 1385 tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
1418 EMAC_REG_EMAC_MDIO_COMM);
1419 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { 1386 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1420 udelay(5); 1387 udelay(5);
1421 break; 1388 break;
@@ -1423,6 +1390,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
1423 } 1390 }
1424 if (tmp & EMAC_MDIO_COMM_START_BUSY) { 1391 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1425 DP(NETIF_MSG_LINK, "write phy register failed\n"); 1392 DP(NETIF_MSG_LINK, "write phy register failed\n");
1393 netdev_err(bp->dev, "MDC/MDIO access timeout\n");
1426 rc = -EFAULT; 1394 rc = -EFAULT;
1427 } else { 1395 } else {
1428 /* data */ 1396 /* data */
@@ -1435,7 +1403,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
1435 udelay(10); 1403 udelay(10);
1436 1404
1437 tmp = REG_RD(bp, phy->mdio_ctrl + 1405 tmp = REG_RD(bp, phy->mdio_ctrl +
1438 EMAC_REG_EMAC_MDIO_COMM); 1406 EMAC_REG_EMAC_MDIO_COMM);
1439 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { 1407 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1440 udelay(5); 1408 udelay(5);
1441 break; 1409 break;
@@ -1443,6 +1411,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
1443 } 1411 }
1444 if (tmp & EMAC_MDIO_COMM_START_BUSY) { 1412 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1445 DP(NETIF_MSG_LINK, "write phy register failed\n"); 1413 DP(NETIF_MSG_LINK, "write phy register failed\n");
1414 netdev_err(bp->dev, "MDC/MDIO access timeout\n");
1446 rc = -EFAULT; 1415 rc = -EFAULT;
1447 } 1416 }
1448 } 1417 }
@@ -1453,20 +1422,20 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
1453 return rc; 1422 return rc;
1454} 1423}
1455 1424
1456u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy, 1425static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
1457 u8 devad, u16 reg, u16 *ret_val) 1426 u8 devad, u16 reg, u16 *ret_val)
1458{ 1427{
1459 u32 val, saved_mode; 1428 u32 val, saved_mode;
1460 u16 i; 1429 u16 i;
1461 u8 rc = 0; 1430 u8 rc = 0;
1462 1431 /*
1463 /* set clause 45 mode, slow down the MDIO clock to 2.5MHz 1432 * Set clause 45 mode, slow down the MDIO clock to 2.5MHz
1464 * (a value of 49==0x31) and make sure that the AUTO poll is off 1433 * (a value of 49==0x31) and make sure that the AUTO poll is off
1465 */ 1434 */
1466 1435
1467 saved_mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); 1436 saved_mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1468 val = saved_mode & ~((EMAC_MDIO_MODE_AUTO_POLL | 1437 val = saved_mode & ~((EMAC_MDIO_MODE_AUTO_POLL |
1469 EMAC_MDIO_MODE_CLOCK_CNT)); 1438 EMAC_MDIO_MODE_CLOCK_CNT));
1470 val |= (EMAC_MDIO_MODE_CLAUSE_45 | 1439 val |= (EMAC_MDIO_MODE_CLAUSE_45 |
1471 (49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT)); 1440 (49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
1472 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val); 1441 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
@@ -1490,7 +1459,7 @@ u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
1490 } 1459 }
1491 if (val & EMAC_MDIO_COMM_START_BUSY) { 1460 if (val & EMAC_MDIO_COMM_START_BUSY) {
1492 DP(NETIF_MSG_LINK, "read phy register failed\n"); 1461 DP(NETIF_MSG_LINK, "read phy register failed\n");
1493 1462 netdev_err(bp->dev, "MDC/MDIO access timeout\n");
1494 *ret_val = 0; 1463 *ret_val = 0;
1495 rc = -EFAULT; 1464 rc = -EFAULT;
1496 1465
@@ -1505,7 +1474,7 @@ u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
1505 udelay(10); 1474 udelay(10);
1506 1475
1507 val = REG_RD(bp, phy->mdio_ctrl + 1476 val = REG_RD(bp, phy->mdio_ctrl +
1508 EMAC_REG_EMAC_MDIO_COMM); 1477 EMAC_REG_EMAC_MDIO_COMM);
1509 if (!(val & EMAC_MDIO_COMM_START_BUSY)) { 1478 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1510 *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA); 1479 *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA);
1511 break; 1480 break;
@@ -1513,7 +1482,7 @@ u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
1513 } 1482 }
1514 if (val & EMAC_MDIO_COMM_START_BUSY) { 1483 if (val & EMAC_MDIO_COMM_START_BUSY) {
1515 DP(NETIF_MSG_LINK, "read phy register failed\n"); 1484 DP(NETIF_MSG_LINK, "read phy register failed\n");
1516 1485 netdev_err(bp->dev, "MDC/MDIO access timeout\n");
1517 *ret_val = 0; 1486 *ret_val = 0;
1518 rc = -EFAULT; 1487 rc = -EFAULT;
1519 } 1488 }
@@ -1529,7 +1498,7 @@ u8 bnx2x_phy_read(struct link_params *params, u8 phy_addr,
1529 u8 devad, u16 reg, u16 *ret_val) 1498 u8 devad, u16 reg, u16 *ret_val)
1530{ 1499{
1531 u8 phy_index; 1500 u8 phy_index;
1532 /** 1501 /*
1533 * Probe for the phy according to the given phy_addr, and execute 1502 * Probe for the phy according to the given phy_addr, and execute
1534 * the read request on it 1503 * the read request on it
1535 */ 1504 */
@@ -1547,7 +1516,7 @@ u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr,
1547 u8 devad, u16 reg, u16 val) 1516 u8 devad, u16 reg, u16 val)
1548{ 1517{
1549 u8 phy_index; 1518 u8 phy_index;
1550 /** 1519 /*
1551 * Probe for the phy according to the given phy_addr, and execute 1520 * Probe for the phy according to the given phy_addr, and execute
1552 * the write request on it 1521 * the write request on it
1553 */ 1522 */
@@ -1576,16 +1545,15 @@ static void bnx2x_set_aer_mmd_xgxs(struct link_params *params,
1576 aer_val = 0x3800 + offset - 1; 1545 aer_val = 0x3800 + offset - 1;
1577 else 1546 else
1578 aer_val = 0x3800 + offset; 1547 aer_val = 0x3800 + offset;
1579 CL45_WR_OVER_CL22(bp, phy, 1548 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
1580 MDIO_REG_BANK_AER_BLOCK, 1549 MDIO_AER_BLOCK_AER_REG, aer_val);
1581 MDIO_AER_BLOCK_AER_REG, aer_val);
1582} 1550}
1583static void bnx2x_set_aer_mmd_serdes(struct bnx2x *bp, 1551static void bnx2x_set_aer_mmd_serdes(struct bnx2x *bp,
1584 struct bnx2x_phy *phy) 1552 struct bnx2x_phy *phy)
1585{ 1553{
1586 CL45_WR_OVER_CL22(bp, phy, 1554 CL22_WR_OVER_CL45(bp, phy,
1587 MDIO_REG_BANK_AER_BLOCK, 1555 MDIO_REG_BANK_AER_BLOCK,
1588 MDIO_AER_BLOCK_AER_REG, 0x3800); 1556 MDIO_AER_BLOCK_AER_REG, 0x3800);
1589} 1557}
1590 1558
1591/******************************************************************/ 1559/******************************************************************/
@@ -1621,9 +1589,8 @@ static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port)
1621 1589
1622 bnx2x_set_serdes_access(bp, port); 1590 bnx2x_set_serdes_access(bp, port);
1623 1591
1624 REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD + 1592 REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD + port*0x10,
1625 port*0x10, 1593 DEFAULT_PHY_DEV_ADDR);
1626 DEFAULT_PHY_DEV_ADDR);
1627} 1594}
1628 1595
1629static void bnx2x_xgxs_deassert(struct link_params *params) 1596static void bnx2x_xgxs_deassert(struct link_params *params)
@@ -1641,23 +1608,22 @@ static void bnx2x_xgxs_deassert(struct link_params *params)
1641 udelay(500); 1608 udelay(500);
1642 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val); 1609 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
1643 1610
1644 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + 1611 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + port*0x18, 0);
1645 port*0x18, 0);
1646 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 1612 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
1647 params->phy[INT_PHY].def_md_devad); 1613 params->phy[INT_PHY].def_md_devad);
1648} 1614}
1649 1615
1650 1616
1651void bnx2x_link_status_update(struct link_params *params, 1617void bnx2x_link_status_update(struct link_params *params,
1652 struct link_vars *vars) 1618 struct link_vars *vars)
1653{ 1619{
1654 struct bnx2x *bp = params->bp; 1620 struct bnx2x *bp = params->bp;
1655 u8 link_10g; 1621 u8 link_10g;
1656 u8 port = params->port; 1622 u8 port = params->port;
1657 1623
1658 vars->link_status = REG_RD(bp, params->shmem_base + 1624 vars->link_status = REG_RD(bp, params->shmem_base +
1659 offsetof(struct shmem_region, 1625 offsetof(struct shmem_region,
1660 port_mb[port].link_status)); 1626 port_mb[port].link_status));
1661 1627
1662 vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP); 1628 vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
1663 1629
@@ -1667,7 +1633,7 @@ void bnx2x_link_status_update(struct link_params *params,
1667 vars->phy_link_up = 1; 1633 vars->phy_link_up = 1;
1668 vars->duplex = DUPLEX_FULL; 1634 vars->duplex = DUPLEX_FULL;
1669 switch (vars->link_status & 1635 switch (vars->link_status &
1670 LINK_STATUS_SPEED_AND_DUPLEX_MASK) { 1636 LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
1671 case LINK_10THD: 1637 case LINK_10THD:
1672 vars->duplex = DUPLEX_HALF; 1638 vars->duplex = DUPLEX_HALF;
1673 /* fall thru */ 1639 /* fall thru */
@@ -1779,20 +1745,20 @@ static void bnx2x_set_master_ln(struct link_params *params,
1779{ 1745{
1780 struct bnx2x *bp = params->bp; 1746 struct bnx2x *bp = params->bp;
1781 u16 new_master_ln, ser_lane; 1747 u16 new_master_ln, ser_lane;
1782 ser_lane = ((params->lane_config & 1748 ser_lane = ((params->lane_config &
1783 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> 1749 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
1784 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); 1750 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
1785 1751
1786 /* set the master_ln for AN */ 1752 /* set the master_ln for AN */
1787 CL45_RD_OVER_CL22(bp, phy, 1753 CL22_RD_OVER_CL45(bp, phy,
1788 MDIO_REG_BANK_XGXS_BLOCK2, 1754 MDIO_REG_BANK_XGXS_BLOCK2,
1789 MDIO_XGXS_BLOCK2_TEST_MODE_LANE, 1755 MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
1790 &new_master_ln); 1756 &new_master_ln);
1791 1757
1792 CL45_WR_OVER_CL22(bp, phy, 1758 CL22_WR_OVER_CL45(bp, phy,
1793 MDIO_REG_BANK_XGXS_BLOCK2 , 1759 MDIO_REG_BANK_XGXS_BLOCK2 ,
1794 MDIO_XGXS_BLOCK2_TEST_MODE_LANE, 1760 MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
1795 (new_master_ln | ser_lane)); 1761 (new_master_ln | ser_lane));
1796} 1762}
1797 1763
1798static u8 bnx2x_reset_unicore(struct link_params *params, 1764static u8 bnx2x_reset_unicore(struct link_params *params,
@@ -1802,17 +1768,16 @@ static u8 bnx2x_reset_unicore(struct link_params *params,
1802 struct bnx2x *bp = params->bp; 1768 struct bnx2x *bp = params->bp;
1803 u16 mii_control; 1769 u16 mii_control;
1804 u16 i; 1770 u16 i;
1805 1771 CL22_RD_OVER_CL45(bp, phy,
1806 CL45_RD_OVER_CL22(bp, phy, 1772 MDIO_REG_BANK_COMBO_IEEE0,
1807 MDIO_REG_BANK_COMBO_IEEE0, 1773 MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
1808 MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
1809 1774
1810 /* reset the unicore */ 1775 /* reset the unicore */
1811 CL45_WR_OVER_CL22(bp, phy, 1776 CL22_WR_OVER_CL45(bp, phy,
1812 MDIO_REG_BANK_COMBO_IEEE0, 1777 MDIO_REG_BANK_COMBO_IEEE0,
1813 MDIO_COMBO_IEEE0_MII_CONTROL, 1778 MDIO_COMBO_IEEE0_MII_CONTROL,
1814 (mii_control | 1779 (mii_control |
1815 MDIO_COMBO_IEEO_MII_CONTROL_RESET)); 1780 MDIO_COMBO_IEEO_MII_CONTROL_RESET));
1816 if (set_serdes) 1781 if (set_serdes)
1817 bnx2x_set_serdes_access(bp, params->port); 1782 bnx2x_set_serdes_access(bp, params->port);
1818 1783
@@ -1821,10 +1786,10 @@ static u8 bnx2x_reset_unicore(struct link_params *params,
1821 udelay(5); 1786 udelay(5);
1822 1787
1823 /* the reset erased the previous bank value */ 1788 /* the reset erased the previous bank value */
1824 CL45_RD_OVER_CL22(bp, phy, 1789 CL22_RD_OVER_CL45(bp, phy,
1825 MDIO_REG_BANK_COMBO_IEEE0, 1790 MDIO_REG_BANK_COMBO_IEEE0,
1826 MDIO_COMBO_IEEE0_MII_CONTROL, 1791 MDIO_COMBO_IEEE0_MII_CONTROL,
1827 &mii_control); 1792 &mii_control);
1828 1793
1829 if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) { 1794 if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) {
1830 udelay(5); 1795 udelay(5);
@@ -1832,6 +1797,9 @@ static u8 bnx2x_reset_unicore(struct link_params *params,
1832 } 1797 }
1833 } 1798 }
1834 1799
1800 netdev_err(bp->dev, "Warning: PHY was not initialized,"
1801 " Port %d\n",
1802 params->port);
1835 DP(NETIF_MSG_LINK, "BUG! XGXS is still in reset!\n"); 1803 DP(NETIF_MSG_LINK, "BUG! XGXS is still in reset!\n");
1836 return -EINVAL; 1804 return -EINVAL;
1837 1805
@@ -1841,43 +1809,45 @@ static void bnx2x_set_swap_lanes(struct link_params *params,
1841 struct bnx2x_phy *phy) 1809 struct bnx2x_phy *phy)
1842{ 1810{
1843 struct bnx2x *bp = params->bp; 1811 struct bnx2x *bp = params->bp;
1844 /* Each two bits represents a lane number: 1812 /*
1845 No swap is 0123 => 0x1b no need to enable the swap */ 1813 * Each two bits represents a lane number:
1814 * No swap is 0123 => 0x1b no need to enable the swap
1815 */
1846 u16 ser_lane, rx_lane_swap, tx_lane_swap; 1816 u16 ser_lane, rx_lane_swap, tx_lane_swap;
1847 1817
1848 ser_lane = ((params->lane_config & 1818 ser_lane = ((params->lane_config &
1849 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> 1819 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
1850 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); 1820 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
1851 rx_lane_swap = ((params->lane_config & 1821 rx_lane_swap = ((params->lane_config &
1852 PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >> 1822 PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
1853 PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT); 1823 PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
1854 tx_lane_swap = ((params->lane_config & 1824 tx_lane_swap = ((params->lane_config &
1855 PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >> 1825 PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
1856 PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT); 1826 PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
1857 1827
1858 if (rx_lane_swap != 0x1b) { 1828 if (rx_lane_swap != 0x1b) {
1859 CL45_WR_OVER_CL22(bp, phy, 1829 CL22_WR_OVER_CL45(bp, phy,
1860 MDIO_REG_BANK_XGXS_BLOCK2, 1830 MDIO_REG_BANK_XGXS_BLOCK2,
1861 MDIO_XGXS_BLOCK2_RX_LN_SWAP, 1831 MDIO_XGXS_BLOCK2_RX_LN_SWAP,
1862 (rx_lane_swap | 1832 (rx_lane_swap |
1863 MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE | 1833 MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
1864 MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE)); 1834 MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
1865 } else { 1835 } else {
1866 CL45_WR_OVER_CL22(bp, phy, 1836 CL22_WR_OVER_CL45(bp, phy,
1867 MDIO_REG_BANK_XGXS_BLOCK2, 1837 MDIO_REG_BANK_XGXS_BLOCK2,
1868 MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0); 1838 MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
1869 } 1839 }
1870 1840
1871 if (tx_lane_swap != 0x1b) { 1841 if (tx_lane_swap != 0x1b) {
1872 CL45_WR_OVER_CL22(bp, phy, 1842 CL22_WR_OVER_CL45(bp, phy,
1873 MDIO_REG_BANK_XGXS_BLOCK2, 1843 MDIO_REG_BANK_XGXS_BLOCK2,
1874 MDIO_XGXS_BLOCK2_TX_LN_SWAP, 1844 MDIO_XGXS_BLOCK2_TX_LN_SWAP,
1875 (tx_lane_swap | 1845 (tx_lane_swap |
1876 MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE)); 1846 MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
1877 } else { 1847 } else {
1878 CL45_WR_OVER_CL22(bp, phy, 1848 CL22_WR_OVER_CL45(bp, phy,
1879 MDIO_REG_BANK_XGXS_BLOCK2, 1849 MDIO_REG_BANK_XGXS_BLOCK2,
1880 MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0); 1850 MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
1881 } 1851 }
1882} 1852}
1883 1853
@@ -1886,66 +1856,66 @@ static void bnx2x_set_parallel_detection(struct bnx2x_phy *phy,
1886{ 1856{
1887 struct bnx2x *bp = params->bp; 1857 struct bnx2x *bp = params->bp;
1888 u16 control2; 1858 u16 control2;
1889 CL45_RD_OVER_CL22(bp, phy, 1859 CL22_RD_OVER_CL45(bp, phy,
1890 MDIO_REG_BANK_SERDES_DIGITAL, 1860 MDIO_REG_BANK_SERDES_DIGITAL,
1891 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2, 1861 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
1892 &control2); 1862 &control2);
1893 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) 1863 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
1894 control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN; 1864 control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
1895 else 1865 else
1896 control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN; 1866 control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
1897 DP(NETIF_MSG_LINK, "phy->speed_cap_mask = 0x%x, control2 = 0x%x\n", 1867 DP(NETIF_MSG_LINK, "phy->speed_cap_mask = 0x%x, control2 = 0x%x\n",
1898 phy->speed_cap_mask, control2); 1868 phy->speed_cap_mask, control2);
1899 CL45_WR_OVER_CL22(bp, phy, 1869 CL22_WR_OVER_CL45(bp, phy,
1900 MDIO_REG_BANK_SERDES_DIGITAL, 1870 MDIO_REG_BANK_SERDES_DIGITAL,
1901 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2, 1871 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
1902 control2); 1872 control2);
1903 1873
1904 if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) && 1874 if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
1905 (phy->speed_cap_mask & 1875 (phy->speed_cap_mask &
1906 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { 1876 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
1907 DP(NETIF_MSG_LINK, "XGXS\n"); 1877 DP(NETIF_MSG_LINK, "XGXS\n");
1908 1878
1909 CL45_WR_OVER_CL22(bp, phy, 1879 CL22_WR_OVER_CL45(bp, phy,
1910 MDIO_REG_BANK_10G_PARALLEL_DETECT, 1880 MDIO_REG_BANK_10G_PARALLEL_DETECT,
1911 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK, 1881 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
1912 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT); 1882 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
1913 1883
1914 CL45_RD_OVER_CL22(bp, phy, 1884 CL22_RD_OVER_CL45(bp, phy,
1915 MDIO_REG_BANK_10G_PARALLEL_DETECT, 1885 MDIO_REG_BANK_10G_PARALLEL_DETECT,
1916 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL, 1886 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
1917 &control2); 1887 &control2);
1918 1888
1919 1889
1920 control2 |= 1890 control2 |=
1921 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN; 1891 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
1922 1892
1923 CL45_WR_OVER_CL22(bp, phy, 1893 CL22_WR_OVER_CL45(bp, phy,
1924 MDIO_REG_BANK_10G_PARALLEL_DETECT, 1894 MDIO_REG_BANK_10G_PARALLEL_DETECT,
1925 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL, 1895 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
1926 control2); 1896 control2);
1927 1897
1928 /* Disable parallel detection of HiG */ 1898 /* Disable parallel detection of HiG */
1929 CL45_WR_OVER_CL22(bp, phy, 1899 CL22_WR_OVER_CL45(bp, phy,
1930 MDIO_REG_BANK_XGXS_BLOCK2, 1900 MDIO_REG_BANK_XGXS_BLOCK2,
1931 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G, 1901 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
1932 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS | 1902 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
1933 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS); 1903 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
1934 } 1904 }
1935} 1905}
1936 1906
1937static void bnx2x_set_autoneg(struct bnx2x_phy *phy, 1907static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
1938 struct link_params *params, 1908 struct link_params *params,
1939 struct link_vars *vars, 1909 struct link_vars *vars,
1940 u8 enable_cl73) 1910 u8 enable_cl73)
1941{ 1911{
1942 struct bnx2x *bp = params->bp; 1912 struct bnx2x *bp = params->bp;
1943 u16 reg_val; 1913 u16 reg_val;
1944 1914
1945 /* CL37 Autoneg */ 1915 /* CL37 Autoneg */
1946 CL45_RD_OVER_CL22(bp, phy, 1916 CL22_RD_OVER_CL45(bp, phy,
1947 MDIO_REG_BANK_COMBO_IEEE0, 1917 MDIO_REG_BANK_COMBO_IEEE0,
1948 MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val); 1918 MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
1949 1919
1950 /* CL37 Autoneg Enabled */ 1920 /* CL37 Autoneg Enabled */
1951 if (vars->line_speed == SPEED_AUTO_NEG) 1921 if (vars->line_speed == SPEED_AUTO_NEG)
@@ -1954,15 +1924,15 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
1954 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | 1924 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
1955 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN); 1925 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
1956 1926
1957 CL45_WR_OVER_CL22(bp, phy, 1927 CL22_WR_OVER_CL45(bp, phy,
1958 MDIO_REG_BANK_COMBO_IEEE0, 1928 MDIO_REG_BANK_COMBO_IEEE0,
1959 MDIO_COMBO_IEEE0_MII_CONTROL, reg_val); 1929 MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
1960 1930
1961 /* Enable/Disable Autodetection */ 1931 /* Enable/Disable Autodetection */
1962 1932
1963 CL45_RD_OVER_CL22(bp, phy, 1933 CL22_RD_OVER_CL45(bp, phy,
1964 MDIO_REG_BANK_SERDES_DIGITAL, 1934 MDIO_REG_BANK_SERDES_DIGITAL,
1965 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val); 1935 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
1966 reg_val &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN | 1936 reg_val &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN |
1967 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT); 1937 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT);
1968 reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE; 1938 reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE;
@@ -1971,14 +1941,14 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
1971 else 1941 else
1972 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET; 1942 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
1973 1943
1974 CL45_WR_OVER_CL22(bp, phy, 1944 CL22_WR_OVER_CL45(bp, phy,
1975 MDIO_REG_BANK_SERDES_DIGITAL, 1945 MDIO_REG_BANK_SERDES_DIGITAL,
1976 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val); 1946 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
1977 1947
1978 /* Enable TetonII and BAM autoneg */ 1948 /* Enable TetonII and BAM autoneg */
1979 CL45_RD_OVER_CL22(bp, phy, 1949 CL22_RD_OVER_CL45(bp, phy,
1980 MDIO_REG_BANK_BAM_NEXT_PAGE, 1950 MDIO_REG_BANK_BAM_NEXT_PAGE,
1981 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL, 1951 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
1982 &reg_val); 1952 &reg_val);
1983 if (vars->line_speed == SPEED_AUTO_NEG) { 1953 if (vars->line_speed == SPEED_AUTO_NEG) {
1984 /* Enable BAM aneg Mode and TetonII aneg Mode */ 1954 /* Enable BAM aneg Mode and TetonII aneg Mode */
@@ -1989,20 +1959,20 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
1989 reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE | 1959 reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
1990 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN); 1960 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
1991 } 1961 }
1992 CL45_WR_OVER_CL22(bp, phy, 1962 CL22_WR_OVER_CL45(bp, phy,
1993 MDIO_REG_BANK_BAM_NEXT_PAGE, 1963 MDIO_REG_BANK_BAM_NEXT_PAGE,
1994 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL, 1964 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
1995 reg_val); 1965 reg_val);
1996 1966
1997 if (enable_cl73) { 1967 if (enable_cl73) {
1998 /* Enable Cl73 FSM status bits */ 1968 /* Enable Cl73 FSM status bits */
1999 CL45_WR_OVER_CL22(bp, phy, 1969 CL22_WR_OVER_CL45(bp, phy,
2000 MDIO_REG_BANK_CL73_USERB0, 1970 MDIO_REG_BANK_CL73_USERB0,
2001 MDIO_CL73_USERB0_CL73_UCTRL, 1971 MDIO_CL73_USERB0_CL73_UCTRL,
2002 0xe); 1972 0xe);
2003 1973
2004 /* Enable BAM Station Manager*/ 1974 /* Enable BAM Station Manager*/
2005 CL45_WR_OVER_CL22(bp, phy, 1975 CL22_WR_OVER_CL45(bp, phy,
2006 MDIO_REG_BANK_CL73_USERB0, 1976 MDIO_REG_BANK_CL73_USERB0,
2007 MDIO_CL73_USERB0_CL73_BAM_CTRL1, 1977 MDIO_CL73_USERB0_CL73_BAM_CTRL1,
2008 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN | 1978 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
@@ -2010,10 +1980,10 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
2010 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN); 1980 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN);
2011 1981
2012 /* Advertise CL73 link speeds */ 1982 /* Advertise CL73 link speeds */
2013 CL45_RD_OVER_CL22(bp, phy, 1983 CL22_RD_OVER_CL45(bp, phy,
2014 MDIO_REG_BANK_CL73_IEEEB1, 1984 MDIO_REG_BANK_CL73_IEEEB1,
2015 MDIO_CL73_IEEEB1_AN_ADV2, 1985 MDIO_CL73_IEEEB1_AN_ADV2,
2016 &reg_val); 1986 &reg_val);
2017 if (phy->speed_cap_mask & 1987 if (phy->speed_cap_mask &
2018 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) 1988 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
2019 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4; 1989 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
@@ -2021,10 +1991,10 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
2021 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) 1991 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
2022 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX; 1992 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
2023 1993
2024 CL45_WR_OVER_CL22(bp, phy, 1994 CL22_WR_OVER_CL45(bp, phy,
2025 MDIO_REG_BANK_CL73_IEEEB1, 1995 MDIO_REG_BANK_CL73_IEEEB1,
2026 MDIO_CL73_IEEEB1_AN_ADV2, 1996 MDIO_CL73_IEEEB1_AN_ADV2,
2027 reg_val); 1997 reg_val);
2028 1998
2029 /* CL73 Autoneg Enabled */ 1999 /* CL73 Autoneg Enabled */
2030 reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN; 2000 reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
@@ -2032,37 +2002,39 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
2032 } else /* CL73 Autoneg Disabled */ 2002 } else /* CL73 Autoneg Disabled */
2033 reg_val = 0; 2003 reg_val = 0;
2034 2004
2035 CL45_WR_OVER_CL22(bp, phy, 2005 CL22_WR_OVER_CL45(bp, phy,
2036 MDIO_REG_BANK_CL73_IEEEB0, 2006 MDIO_REG_BANK_CL73_IEEEB0,
2037 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val); 2007 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
2038} 2008}
2039 2009
2040/* program SerDes, forced speed */ 2010/* program SerDes, forced speed */
2041static void bnx2x_program_serdes(struct bnx2x_phy *phy, 2011static void bnx2x_program_serdes(struct bnx2x_phy *phy,
2042 struct link_params *params, 2012 struct link_params *params,
2043 struct link_vars *vars) 2013 struct link_vars *vars)
2044{ 2014{
2045 struct bnx2x *bp = params->bp; 2015 struct bnx2x *bp = params->bp;
2046 u16 reg_val; 2016 u16 reg_val;
2047 2017
2048 /* program duplex, disable autoneg and sgmii*/ 2018 /* program duplex, disable autoneg and sgmii*/
2049 CL45_RD_OVER_CL22(bp, phy, 2019 CL22_RD_OVER_CL45(bp, phy,
2050 MDIO_REG_BANK_COMBO_IEEE0, 2020 MDIO_REG_BANK_COMBO_IEEE0,
2051 MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val); 2021 MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
2052 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX | 2022 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
2053 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | 2023 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
2054 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK); 2024 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK);
2055 if (phy->req_duplex == DUPLEX_FULL) 2025 if (phy->req_duplex == DUPLEX_FULL)
2056 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX; 2026 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
2057 CL45_WR_OVER_CL22(bp, phy, 2027 CL22_WR_OVER_CL45(bp, phy,
2058 MDIO_REG_BANK_COMBO_IEEE0, 2028 MDIO_REG_BANK_COMBO_IEEE0,
2059 MDIO_COMBO_IEEE0_MII_CONTROL, reg_val); 2029 MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
2060 2030
2061 /* program speed 2031 /*
2062 - needed only if the speed is greater than 1G (2.5G or 10G) */ 2032 * program speed
2063 CL45_RD_OVER_CL22(bp, phy, 2033 * - needed only if the speed is greater than 1G (2.5G or 10G)
2064 MDIO_REG_BANK_SERDES_DIGITAL, 2034 */
2065 MDIO_SERDES_DIGITAL_MISC1, &reg_val); 2035 CL22_RD_OVER_CL45(bp, phy,
2036 MDIO_REG_BANK_SERDES_DIGITAL,
2037 MDIO_SERDES_DIGITAL_MISC1, &reg_val);
2066 /* clearing the speed value before setting the right speed */ 2038 /* clearing the speed value before setting the right speed */
2067 DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val); 2039 DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val);
2068 2040
@@ -2083,9 +2055,9 @@ static void bnx2x_program_serdes(struct bnx2x_phy *phy,
2083 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G; 2055 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G;
2084 } 2056 }
2085 2057
2086 CL45_WR_OVER_CL22(bp, phy, 2058 CL22_WR_OVER_CL45(bp, phy,
2087 MDIO_REG_BANK_SERDES_DIGITAL, 2059 MDIO_REG_BANK_SERDES_DIGITAL,
2088 MDIO_SERDES_DIGITAL_MISC1, reg_val); 2060 MDIO_SERDES_DIGITAL_MISC1, reg_val);
2089 2061
2090} 2062}
2091 2063
@@ -2102,13 +2074,13 @@ static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x_phy *phy,
2102 val |= MDIO_OVER_1G_UP1_2_5G; 2074 val |= MDIO_OVER_1G_UP1_2_5G;
2103 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) 2075 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
2104 val |= MDIO_OVER_1G_UP1_10G; 2076 val |= MDIO_OVER_1G_UP1_10G;
2105 CL45_WR_OVER_CL22(bp, phy, 2077 CL22_WR_OVER_CL45(bp, phy,
2106 MDIO_REG_BANK_OVER_1G, 2078 MDIO_REG_BANK_OVER_1G,
2107 MDIO_OVER_1G_UP1, val); 2079 MDIO_OVER_1G_UP1, val);
2108 2080
2109 CL45_WR_OVER_CL22(bp, phy, 2081 CL22_WR_OVER_CL45(bp, phy,
2110 MDIO_REG_BANK_OVER_1G, 2082 MDIO_REG_BANK_OVER_1G,
2111 MDIO_OVER_1G_UP3, 0x400); 2083 MDIO_OVER_1G_UP3, 0x400);
2112} 2084}
2113 2085
2114static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy, 2086static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
@@ -2116,22 +2088,21 @@ static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
2116{ 2088{
2117 struct bnx2x *bp = params->bp; 2089 struct bnx2x *bp = params->bp;
2118 *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX; 2090 *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
2119 /* resolve pause mode and advertisement 2091 /*
2120 * Please refer to Table 28B-3 of the 802.3ab-1999 spec */ 2092 * Resolve pause mode and advertisement.
2093 * Please refer to Table 28B-3 of the 802.3ab-1999 spec
2094 */
2121 2095
2122 switch (phy->req_flow_ctrl) { 2096 switch (phy->req_flow_ctrl) {
2123 case BNX2X_FLOW_CTRL_AUTO: 2097 case BNX2X_FLOW_CTRL_AUTO:
2124 if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH) { 2098 if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH)
2125 *ieee_fc |= 2099 *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
2126 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; 2100 else
2127 } else {
2128 *ieee_fc |= 2101 *ieee_fc |=
2129 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; 2102 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
2130 }
2131 break; 2103 break;
2132 case BNX2X_FLOW_CTRL_TX: 2104 case BNX2X_FLOW_CTRL_TX:
2133 *ieee_fc |= 2105 *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
2134 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
2135 break; 2106 break;
2136 2107
2137 case BNX2X_FLOW_CTRL_RX: 2108 case BNX2X_FLOW_CTRL_RX:
@@ -2149,23 +2120,23 @@ static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
2149 2120
2150static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x_phy *phy, 2121static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x_phy *phy,
2151 struct link_params *params, 2122 struct link_params *params,
2152 u16 ieee_fc) 2123 u16 ieee_fc)
2153{ 2124{
2154 struct bnx2x *bp = params->bp; 2125 struct bnx2x *bp = params->bp;
2155 u16 val; 2126 u16 val;
2156 /* for AN, we are always publishing full duplex */ 2127 /* for AN, we are always publishing full duplex */
2157 2128
2158 CL45_WR_OVER_CL22(bp, phy, 2129 CL22_WR_OVER_CL45(bp, phy,
2159 MDIO_REG_BANK_COMBO_IEEE0, 2130 MDIO_REG_BANK_COMBO_IEEE0,
2160 MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc); 2131 MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc);
2161 CL45_RD_OVER_CL22(bp, phy, 2132 CL22_RD_OVER_CL45(bp, phy,
2162 MDIO_REG_BANK_CL73_IEEEB1, 2133 MDIO_REG_BANK_CL73_IEEEB1,
2163 MDIO_CL73_IEEEB1_AN_ADV1, &val); 2134 MDIO_CL73_IEEEB1_AN_ADV1, &val);
2164 val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH; 2135 val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH;
2165 val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK); 2136 val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK);
2166 CL45_WR_OVER_CL22(bp, phy, 2137 CL22_WR_OVER_CL45(bp, phy,
2167 MDIO_REG_BANK_CL73_IEEEB1, 2138 MDIO_REG_BANK_CL73_IEEEB1,
2168 MDIO_CL73_IEEEB1_AN_ADV1, val); 2139 MDIO_CL73_IEEEB1_AN_ADV1, val);
2169} 2140}
2170 2141
2171static void bnx2x_restart_autoneg(struct bnx2x_phy *phy, 2142static void bnx2x_restart_autoneg(struct bnx2x_phy *phy,
@@ -2179,67 +2150,67 @@ static void bnx2x_restart_autoneg(struct bnx2x_phy *phy,
2179 /* Enable and restart BAM/CL37 aneg */ 2150 /* Enable and restart BAM/CL37 aneg */
2180 2151
2181 if (enable_cl73) { 2152 if (enable_cl73) {
2182 CL45_RD_OVER_CL22(bp, phy, 2153 CL22_RD_OVER_CL45(bp, phy,
2183 MDIO_REG_BANK_CL73_IEEEB0, 2154 MDIO_REG_BANK_CL73_IEEEB0,
2184 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, 2155 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
2185 &mii_control); 2156 &mii_control);
2186 2157
2187 CL45_WR_OVER_CL22(bp, phy, 2158 CL22_WR_OVER_CL45(bp, phy,
2188 MDIO_REG_BANK_CL73_IEEEB0, 2159 MDIO_REG_BANK_CL73_IEEEB0,
2189 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, 2160 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
2190 (mii_control | 2161 (mii_control |
2191 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN | 2162 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
2192 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN)); 2163 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
2193 } else { 2164 } else {
2194 2165
2195 CL45_RD_OVER_CL22(bp, phy, 2166 CL22_RD_OVER_CL45(bp, phy,
2196 MDIO_REG_BANK_COMBO_IEEE0, 2167 MDIO_REG_BANK_COMBO_IEEE0,
2197 MDIO_COMBO_IEEE0_MII_CONTROL, 2168 MDIO_COMBO_IEEE0_MII_CONTROL,
2198 &mii_control); 2169 &mii_control);
2199 DP(NETIF_MSG_LINK, 2170 DP(NETIF_MSG_LINK,
2200 "bnx2x_restart_autoneg mii_control before = 0x%x\n", 2171 "bnx2x_restart_autoneg mii_control before = 0x%x\n",
2201 mii_control); 2172 mii_control);
2202 CL45_WR_OVER_CL22(bp, phy, 2173 CL22_WR_OVER_CL45(bp, phy,
2203 MDIO_REG_BANK_COMBO_IEEE0, 2174 MDIO_REG_BANK_COMBO_IEEE0,
2204 MDIO_COMBO_IEEE0_MII_CONTROL, 2175 MDIO_COMBO_IEEE0_MII_CONTROL,
2205 (mii_control | 2176 (mii_control |
2206 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | 2177 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
2207 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN)); 2178 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
2208 } 2179 }
2209} 2180}
2210 2181
2211static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy, 2182static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
2212 struct link_params *params, 2183 struct link_params *params,
2213 struct link_vars *vars) 2184 struct link_vars *vars)
2214{ 2185{
2215 struct bnx2x *bp = params->bp; 2186 struct bnx2x *bp = params->bp;
2216 u16 control1; 2187 u16 control1;
2217 2188
2218 /* in SGMII mode, the unicore is always slave */ 2189 /* in SGMII mode, the unicore is always slave */
2219 2190
2220 CL45_RD_OVER_CL22(bp, phy, 2191 CL22_RD_OVER_CL45(bp, phy,
2221 MDIO_REG_BANK_SERDES_DIGITAL, 2192 MDIO_REG_BANK_SERDES_DIGITAL,
2222 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, 2193 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
2223 &control1); 2194 &control1);
2224 control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT; 2195 control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
2225 /* set sgmii mode (and not fiber) */ 2196 /* set sgmii mode (and not fiber) */
2226 control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE | 2197 control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
2227 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET | 2198 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
2228 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE); 2199 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
2229 CL45_WR_OVER_CL22(bp, phy, 2200 CL22_WR_OVER_CL45(bp, phy,
2230 MDIO_REG_BANK_SERDES_DIGITAL, 2201 MDIO_REG_BANK_SERDES_DIGITAL,
2231 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, 2202 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
2232 control1); 2203 control1);
2233 2204
2234 /* if forced speed */ 2205 /* if forced speed */
2235 if (!(vars->line_speed == SPEED_AUTO_NEG)) { 2206 if (!(vars->line_speed == SPEED_AUTO_NEG)) {
2236 /* set speed, disable autoneg */ 2207 /* set speed, disable autoneg */
2237 u16 mii_control; 2208 u16 mii_control;
2238 2209
2239 CL45_RD_OVER_CL22(bp, phy, 2210 CL22_RD_OVER_CL45(bp, phy,
2240 MDIO_REG_BANK_COMBO_IEEE0, 2211 MDIO_REG_BANK_COMBO_IEEE0,
2241 MDIO_COMBO_IEEE0_MII_CONTROL, 2212 MDIO_COMBO_IEEE0_MII_CONTROL,
2242 &mii_control); 2213 &mii_control);
2243 mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | 2214 mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
2244 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK| 2215 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK|
2245 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX); 2216 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
@@ -2267,10 +2238,10 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
2267 if (phy->req_duplex == DUPLEX_FULL) 2238 if (phy->req_duplex == DUPLEX_FULL)
2268 mii_control |= 2239 mii_control |=
2269 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX; 2240 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
2270 CL45_WR_OVER_CL22(bp, phy, 2241 CL22_WR_OVER_CL45(bp, phy,
2271 MDIO_REG_BANK_COMBO_IEEE0, 2242 MDIO_REG_BANK_COMBO_IEEE0,
2272 MDIO_COMBO_IEEE0_MII_CONTROL, 2243 MDIO_COMBO_IEEE0_MII_CONTROL,
2273 mii_control); 2244 mii_control);
2274 2245
2275 } else { /* AN mode */ 2246 } else { /* AN mode */
2276 /* enable and restart AN */ 2247 /* enable and restart AN */
@@ -2285,19 +2256,19 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
2285 2256
2286static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result) 2257static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
2287{ /* LD LP */ 2258{ /* LD LP */
2288 switch (pause_result) { /* ASYM P ASYM P */ 2259 switch (pause_result) { /* ASYM P ASYM P */
2289 case 0xb: /* 1 0 1 1 */ 2260 case 0xb: /* 1 0 1 1 */
2290 vars->flow_ctrl = BNX2X_FLOW_CTRL_TX; 2261 vars->flow_ctrl = BNX2X_FLOW_CTRL_TX;
2291 break; 2262 break;
2292 2263
2293 case 0xe: /* 1 1 1 0 */ 2264 case 0xe: /* 1 1 1 0 */
2294 vars->flow_ctrl = BNX2X_FLOW_CTRL_RX; 2265 vars->flow_ctrl = BNX2X_FLOW_CTRL_RX;
2295 break; 2266 break;
2296 2267
2297 case 0x5: /* 0 1 0 1 */ 2268 case 0x5: /* 0 1 0 1 */
2298 case 0x7: /* 0 1 1 1 */ 2269 case 0x7: /* 0 1 1 1 */
2299 case 0xd: /* 1 1 0 1 */ 2270 case 0xd: /* 1 1 0 1 */
2300 case 0xf: /* 1 1 1 1 */ 2271 case 0xf: /* 1 1 1 1 */
2301 vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH; 2272 vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH;
2302 break; 2273 break;
2303 2274
@@ -2317,24 +2288,24 @@ static u8 bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy,
2317 u16 pd_10g, status2_1000x; 2288 u16 pd_10g, status2_1000x;
2318 if (phy->req_line_speed != SPEED_AUTO_NEG) 2289 if (phy->req_line_speed != SPEED_AUTO_NEG)
2319 return 0; 2290 return 0;
2320 CL45_RD_OVER_CL22(bp, phy, 2291 CL22_RD_OVER_CL45(bp, phy,
2321 MDIO_REG_BANK_SERDES_DIGITAL, 2292 MDIO_REG_BANK_SERDES_DIGITAL,
2322 MDIO_SERDES_DIGITAL_A_1000X_STATUS2, 2293 MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
2323 &status2_1000x); 2294 &status2_1000x);
2324 CL45_RD_OVER_CL22(bp, phy, 2295 CL22_RD_OVER_CL45(bp, phy,
2325 MDIO_REG_BANK_SERDES_DIGITAL, 2296 MDIO_REG_BANK_SERDES_DIGITAL,
2326 MDIO_SERDES_DIGITAL_A_1000X_STATUS2, 2297 MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
2327 &status2_1000x); 2298 &status2_1000x);
2328 if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) { 2299 if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) {
2329 DP(NETIF_MSG_LINK, "1G parallel detect link on port %d\n", 2300 DP(NETIF_MSG_LINK, "1G parallel detect link on port %d\n",
2330 params->port); 2301 params->port);
2331 return 1; 2302 return 1;
2332 } 2303 }
2333 2304
2334 CL45_RD_OVER_CL22(bp, phy, 2305 CL22_RD_OVER_CL45(bp, phy,
2335 MDIO_REG_BANK_10G_PARALLEL_DETECT, 2306 MDIO_REG_BANK_10G_PARALLEL_DETECT,
2336 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS, 2307 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS,
2337 &pd_10g); 2308 &pd_10g);
2338 2309
2339 if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) { 2310 if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) {
2340 DP(NETIF_MSG_LINK, "10G parallel detect link on port %d\n", 2311 DP(NETIF_MSG_LINK, "10G parallel detect link on port %d\n",
@@ -2373,14 +2344,14 @@ static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
2373 (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | 2344 (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
2374 MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) { 2345 MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) {
2375 2346
2376 CL45_RD_OVER_CL22(bp, phy, 2347 CL22_RD_OVER_CL45(bp, phy,
2377 MDIO_REG_BANK_CL73_IEEEB1, 2348 MDIO_REG_BANK_CL73_IEEEB1,
2378 MDIO_CL73_IEEEB1_AN_ADV1, 2349 MDIO_CL73_IEEEB1_AN_ADV1,
2379 &ld_pause); 2350 &ld_pause);
2380 CL45_RD_OVER_CL22(bp, phy, 2351 CL22_RD_OVER_CL45(bp, phy,
2381 MDIO_REG_BANK_CL73_IEEEB1, 2352 MDIO_REG_BANK_CL73_IEEEB1,
2382 MDIO_CL73_IEEEB1_AN_LP_ADV1, 2353 MDIO_CL73_IEEEB1_AN_LP_ADV1,
2383 &lp_pause); 2354 &lp_pause);
2384 pause_result = (ld_pause & 2355 pause_result = (ld_pause &
2385 MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK) 2356 MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK)
2386 >> 8; 2357 >> 8;
@@ -2390,18 +2361,18 @@ static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
2390 DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n", 2361 DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n",
2391 pause_result); 2362 pause_result);
2392 } else { 2363 } else {
2393 CL45_RD_OVER_CL22(bp, phy, 2364 CL22_RD_OVER_CL45(bp, phy,
2394 MDIO_REG_BANK_COMBO_IEEE0, 2365 MDIO_REG_BANK_COMBO_IEEE0,
2395 MDIO_COMBO_IEEE0_AUTO_NEG_ADV, 2366 MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
2396 &ld_pause); 2367 &ld_pause);
2397 CL45_RD_OVER_CL22(bp, phy, 2368 CL22_RD_OVER_CL45(bp, phy,
2398 MDIO_REG_BANK_COMBO_IEEE0, 2369 MDIO_REG_BANK_COMBO_IEEE0,
2399 MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1, 2370 MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
2400 &lp_pause); 2371 &lp_pause);
2401 pause_result = (ld_pause & 2372 pause_result = (ld_pause &
2402 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5; 2373 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
2403 pause_result |= (lp_pause & 2374 pause_result |= (lp_pause &
2404 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7; 2375 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
2405 DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n", 2376 DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n",
2406 pause_result); 2377 pause_result);
2407 } 2378 }
@@ -2417,25 +2388,25 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
2417 u16 rx_status, ustat_val, cl37_fsm_recieved; 2388 u16 rx_status, ustat_val, cl37_fsm_recieved;
2418 DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n"); 2389 DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n");
2419 /* Step 1: Make sure signal is detected */ 2390 /* Step 1: Make sure signal is detected */
2420 CL45_RD_OVER_CL22(bp, phy, 2391 CL22_RD_OVER_CL45(bp, phy,
2421 MDIO_REG_BANK_RX0, 2392 MDIO_REG_BANK_RX0,
2422 MDIO_RX0_RX_STATUS, 2393 MDIO_RX0_RX_STATUS,
2423 &rx_status); 2394 &rx_status);
2424 if ((rx_status & MDIO_RX0_RX_STATUS_SIGDET) != 2395 if ((rx_status & MDIO_RX0_RX_STATUS_SIGDET) !=
2425 (MDIO_RX0_RX_STATUS_SIGDET)) { 2396 (MDIO_RX0_RX_STATUS_SIGDET)) {
2426 DP(NETIF_MSG_LINK, "Signal is not detected. Restoring CL73." 2397 DP(NETIF_MSG_LINK, "Signal is not detected. Restoring CL73."
2427 "rx_status(0x80b0) = 0x%x\n", rx_status); 2398 "rx_status(0x80b0) = 0x%x\n", rx_status);
2428 CL45_WR_OVER_CL22(bp, phy, 2399 CL22_WR_OVER_CL45(bp, phy,
2429 MDIO_REG_BANK_CL73_IEEEB0, 2400 MDIO_REG_BANK_CL73_IEEEB0,
2430 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, 2401 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
2431 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN); 2402 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN);
2432 return; 2403 return;
2433 } 2404 }
2434 /* Step 2: Check CL73 state machine */ 2405 /* Step 2: Check CL73 state machine */
2435 CL45_RD_OVER_CL22(bp, phy, 2406 CL22_RD_OVER_CL45(bp, phy,
2436 MDIO_REG_BANK_CL73_USERB0, 2407 MDIO_REG_BANK_CL73_USERB0,
2437 MDIO_CL73_USERB0_CL73_USTAT1, 2408 MDIO_CL73_USERB0_CL73_USTAT1,
2438 &ustat_val); 2409 &ustat_val);
2439 if ((ustat_val & 2410 if ((ustat_val &
2440 (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK | 2411 (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK |
2441 MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) != 2412 MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) !=
@@ -2445,12 +2416,14 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
2445 "ustat_val(0x8371) = 0x%x\n", ustat_val); 2416 "ustat_val(0x8371) = 0x%x\n", ustat_val);
2446 return; 2417 return;
2447 } 2418 }
2448 /* Step 3: Check CL37 Message Pages received to indicate LP 2419 /*
2449 supports only CL37 */ 2420 * Step 3: Check CL37 Message Pages received to indicate LP
2450 CL45_RD_OVER_CL22(bp, phy, 2421 * supports only CL37
2451 MDIO_REG_BANK_REMOTE_PHY, 2422 */
2452 MDIO_REMOTE_PHY_MISC_RX_STATUS, 2423 CL22_RD_OVER_CL45(bp, phy,
2453 &cl37_fsm_recieved); 2424 MDIO_REG_BANK_REMOTE_PHY,
2425 MDIO_REMOTE_PHY_MISC_RX_STATUS,
2426 &cl37_fsm_recieved);
2454 if ((cl37_fsm_recieved & 2427 if ((cl37_fsm_recieved &
2455 (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG | 2428 (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG |
2456 MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) != 2429 MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) !=
@@ -2461,14 +2434,18 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
2461 cl37_fsm_recieved); 2434 cl37_fsm_recieved);
2462 return; 2435 return;
2463 } 2436 }
2464 /* The combined cl37/cl73 fsm state information indicating that we are 2437 /*
2465 connected to a device which does not support cl73, but does support 2438 * The combined cl37/cl73 fsm state information indicating that
2466 cl37 BAM. In this case we disable cl73 and restart cl37 auto-neg */ 2439 * we are connected to a device which does not support cl73, but
2440 * does support cl37 BAM. In this case we disable cl73 and
2441 * restart cl37 auto-neg
2442 */
2443
2467 /* Disable CL73 */ 2444 /* Disable CL73 */
2468 CL45_WR_OVER_CL22(bp, phy, 2445 CL22_WR_OVER_CL45(bp, phy,
2469 MDIO_REG_BANK_CL73_IEEEB0, 2446 MDIO_REG_BANK_CL73_IEEEB0,
2470 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, 2447 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
2471 0); 2448 0);
2472 /* Restart CL37 autoneg */ 2449 /* Restart CL37 autoneg */
2473 bnx2x_restart_autoneg(phy, params, 0); 2450 bnx2x_restart_autoneg(phy, params, 0);
2474 DP(NETIF_MSG_LINK, "Disabling CL73, and restarting CL37 autoneg\n"); 2451 DP(NETIF_MSG_LINK, "Disabling CL73, and restarting CL37 autoneg\n");
@@ -2493,14 +2470,14 @@ static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy,
2493 struct link_vars *vars) 2470 struct link_vars *vars)
2494{ 2471{
2495 struct bnx2x *bp = params->bp; 2472 struct bnx2x *bp = params->bp;
2496 u16 new_line_speed , gp_status; 2473 u16 new_line_speed, gp_status;
2497 u8 rc = 0; 2474 u8 rc = 0;
2498 2475
2499 /* Read gp_status */ 2476 /* Read gp_status */
2500 CL45_RD_OVER_CL22(bp, phy, 2477 CL22_RD_OVER_CL45(bp, phy,
2501 MDIO_REG_BANK_GP_STATUS, 2478 MDIO_REG_BANK_GP_STATUS,
2502 MDIO_GP_STATUS_TOP_AN_STATUS1, 2479 MDIO_GP_STATUS_TOP_AN_STATUS1,
2503 &gp_status); 2480 &gp_status);
2504 2481
2505 if (phy->req_line_speed == SPEED_AUTO_NEG) 2482 if (phy->req_line_speed == SPEED_AUTO_NEG)
2506 vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED; 2483 vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
@@ -2637,9 +2614,9 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
2637 u16 bank; 2614 u16 bank;
2638 2615
2639 /* read precomp */ 2616 /* read precomp */
2640 CL45_RD_OVER_CL22(bp, phy, 2617 CL22_RD_OVER_CL45(bp, phy,
2641 MDIO_REG_BANK_OVER_1G, 2618 MDIO_REG_BANK_OVER_1G,
2642 MDIO_OVER_1G_LP_UP2, &lp_up2); 2619 MDIO_OVER_1G_LP_UP2, &lp_up2);
2643 2620
2644 /* bits [10:7] at lp_up2, positioned at [15:12] */ 2621 /* bits [10:7] at lp_up2, positioned at [15:12] */
2645 lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >> 2622 lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
@@ -2651,18 +2628,18 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
2651 2628
2652 for (bank = MDIO_REG_BANK_TX0; bank <= MDIO_REG_BANK_TX3; 2629 for (bank = MDIO_REG_BANK_TX0; bank <= MDIO_REG_BANK_TX3;
2653 bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0)) { 2630 bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0)) {
2654 CL45_RD_OVER_CL22(bp, phy, 2631 CL22_RD_OVER_CL45(bp, phy,
2655 bank, 2632 bank,
2656 MDIO_TX0_TX_DRIVER, &tx_driver); 2633 MDIO_TX0_TX_DRIVER, &tx_driver);
2657 2634
2658 /* replace tx_driver bits [15:12] */ 2635 /* replace tx_driver bits [15:12] */
2659 if (lp_up2 != 2636 if (lp_up2 !=
2660 (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) { 2637 (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) {
2661 tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK; 2638 tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
2662 tx_driver |= lp_up2; 2639 tx_driver |= lp_up2;
2663 CL45_WR_OVER_CL22(bp, phy, 2640 CL22_WR_OVER_CL45(bp, phy,
2664 bank, 2641 bank,
2665 MDIO_TX0_TX_DRIVER, tx_driver); 2642 MDIO_TX0_TX_DRIVER, tx_driver);
2666 } 2643 }
2667 } 2644 }
2668} 2645}
@@ -2676,10 +2653,10 @@ static u8 bnx2x_emac_program(struct link_params *params,
2676 2653
2677 DP(NETIF_MSG_LINK, "setting link speed & duplex\n"); 2654 DP(NETIF_MSG_LINK, "setting link speed & duplex\n");
2678 bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 + 2655 bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 +
2679 EMAC_REG_EMAC_MODE, 2656 EMAC_REG_EMAC_MODE,
2680 (EMAC_MODE_25G_MODE | 2657 (EMAC_MODE_25G_MODE |
2681 EMAC_MODE_PORT_MII_10M | 2658 EMAC_MODE_PORT_MII_10M |
2682 EMAC_MODE_HALF_DUPLEX)); 2659 EMAC_MODE_HALF_DUPLEX));
2683 switch (vars->line_speed) { 2660 switch (vars->line_speed) {
2684 case SPEED_10: 2661 case SPEED_10:
2685 mode |= EMAC_MODE_PORT_MII_10M; 2662 mode |= EMAC_MODE_PORT_MII_10M;
@@ -2707,8 +2684,8 @@ static u8 bnx2x_emac_program(struct link_params *params,
2707 if (vars->duplex == DUPLEX_HALF) 2684 if (vars->duplex == DUPLEX_HALF)
2708 mode |= EMAC_MODE_HALF_DUPLEX; 2685 mode |= EMAC_MODE_HALF_DUPLEX;
2709 bnx2x_bits_en(bp, 2686 bnx2x_bits_en(bp,
2710 GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE, 2687 GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
2711 mode); 2688 mode);
2712 2689
2713 bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed); 2690 bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
2714 return 0; 2691 return 0;
@@ -2723,7 +2700,7 @@ static void bnx2x_set_preemphasis(struct bnx2x_phy *phy,
2723 2700
2724 for (bank = MDIO_REG_BANK_RX0, i = 0; bank <= MDIO_REG_BANK_RX3; 2701 for (bank = MDIO_REG_BANK_RX0, i = 0; bank <= MDIO_REG_BANK_RX3;
2725 bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0), i++) { 2702 bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0), i++) {
2726 CL45_WR_OVER_CL22(bp, phy, 2703 CL22_WR_OVER_CL45(bp, phy,
2727 bank, 2704 bank,
2728 MDIO_RX0_RX_EQ_BOOST, 2705 MDIO_RX0_RX_EQ_BOOST,
2729 phy->rx_preemphasis[i]); 2706 phy->rx_preemphasis[i]);
@@ -2731,7 +2708,7 @@ static void bnx2x_set_preemphasis(struct bnx2x_phy *phy,
2731 2708
2732 for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3; 2709 for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3;
2733 bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) { 2710 bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) {
2734 CL45_WR_OVER_CL22(bp, phy, 2711 CL22_WR_OVER_CL45(bp, phy,
2735 bank, 2712 bank,
2736 MDIO_TX0_TX_DRIVER, 2713 MDIO_TX0_TX_DRIVER,
2737 phy->tx_preemphasis[i]); 2714 phy->tx_preemphasis[i]);
@@ -2754,7 +2731,7 @@ static void bnx2x_init_internal_phy(struct bnx2x_phy *phy,
2754 /* forced speed requested? */ 2731 /* forced speed requested? */
2755 if (vars->line_speed != SPEED_AUTO_NEG || 2732 if (vars->line_speed != SPEED_AUTO_NEG ||
2756 (SINGLE_MEDIA_DIRECT(params) && 2733 (SINGLE_MEDIA_DIRECT(params) &&
2757 params->loopback_mode == LOOPBACK_EXT)) { 2734 params->loopback_mode == LOOPBACK_EXT)) {
2758 DP(NETIF_MSG_LINK, "not SGMII, no AN\n"); 2735 DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
2759 2736
2760 /* disable autoneg */ 2737 /* disable autoneg */
@@ -2771,7 +2748,7 @@ static void bnx2x_init_internal_phy(struct bnx2x_phy *phy,
2771 2748
2772 /* program duplex & pause advertisement (for aneg) */ 2749 /* program duplex & pause advertisement (for aneg) */
2773 bnx2x_set_ieee_aneg_advertisment(phy, params, 2750 bnx2x_set_ieee_aneg_advertisment(phy, params,
2774 vars->ieee_fc); 2751 vars->ieee_fc);
2775 2752
2776 /* enable autoneg */ 2753 /* enable autoneg */
2777 bnx2x_set_autoneg(phy, params, vars, enable_cl73); 2754 bnx2x_set_autoneg(phy, params, vars, enable_cl73);
@@ -2842,7 +2819,8 @@ static u8 bnx2x_init_xgxs(struct bnx2x_phy *phy,
2842} 2819}
2843 2820
2844static u16 bnx2x_wait_reset_complete(struct bnx2x *bp, 2821static u16 bnx2x_wait_reset_complete(struct bnx2x *bp,
2845 struct bnx2x_phy *phy) 2822 struct bnx2x_phy *phy,
2823 struct link_params *params)
2846{ 2824{
2847 u16 cnt, ctrl; 2825 u16 cnt, ctrl;
2848 /* Wait for soft reset to get cleared upto 1 sec */ 2826 /* Wait for soft reset to get cleared upto 1 sec */
@@ -2853,6 +2831,11 @@ static u16 bnx2x_wait_reset_complete(struct bnx2x *bp,
2853 break; 2831 break;
2854 msleep(1); 2832 msleep(1);
2855 } 2833 }
2834
2835 if (cnt == 1000)
2836 netdev_err(bp->dev, "Warning: PHY was not initialized,"
2837 " Port %d\n",
2838 params->port);
2856 DP(NETIF_MSG_LINK, "control reg 0x%x (after %d ms)\n", ctrl, cnt); 2839 DP(NETIF_MSG_LINK, "control reg 0x%x (after %d ms)\n", ctrl, cnt);
2857 return cnt; 2840 return cnt;
2858} 2841}
@@ -2863,9 +2846,7 @@ static void bnx2x_link_int_enable(struct link_params *params)
2863 u32 mask; 2846 u32 mask;
2864 struct bnx2x *bp = params->bp; 2847 struct bnx2x *bp = params->bp;
2865 2848
2866 /* setting the status to report on link up 2849 /* Setting the status to report on link up for either XGXS or SerDes */
2867 for either XGXS or SerDes */
2868
2869 if (params->switch_cfg == SWITCH_CFG_10G) { 2850 if (params->switch_cfg == SWITCH_CFG_10G) {
2870 mask = (NIG_MASK_XGXS0_LINK10G | 2851 mask = (NIG_MASK_XGXS0_LINK10G |
2871 NIG_MASK_XGXS0_LINK_STATUS); 2852 NIG_MASK_XGXS0_LINK_STATUS);
@@ -2908,7 +2889,7 @@ static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port,
2908{ 2889{
2909 u32 latch_status = 0; 2890 u32 latch_status = 0;
2910 2891
2911 /** 2892 /*
2912 * Disable the MI INT ( external phy int ) by writing 1 to the 2893 * Disable the MI INT ( external phy int ) by writing 1 to the
2913 * status register. Link down indication is high-active-signal, 2894 * status register. Link down indication is high-active-signal,
2914 * so in this case we need to write the status to clear the XOR 2895 * so in this case we need to write the status to clear the XOR
@@ -2933,27 +2914,30 @@ static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port,
2933 2914
2934 /* For all latched-signal=up : Re-Arm Latch signals */ 2915 /* For all latched-signal=up : Re-Arm Latch signals */
2935 REG_WR(bp, NIG_REG_LATCH_STATUS_0 + port*8, 2916 REG_WR(bp, NIG_REG_LATCH_STATUS_0 + port*8,
2936 (latch_status & 0xfffe) | (latch_status & 1)); 2917 (latch_status & 0xfffe) | (latch_status & 1));
2937 } 2918 }
2938 /* For all latched-signal=up,Write original_signal to status */ 2919 /* For all latched-signal=up,Write original_signal to status */
2939} 2920}
2940 2921
2941static void bnx2x_link_int_ack(struct link_params *params, 2922static void bnx2x_link_int_ack(struct link_params *params,
2942 struct link_vars *vars, u8 is_10g) 2923 struct link_vars *vars, u8 is_10g)
2943{ 2924{
2944 struct bnx2x *bp = params->bp; 2925 struct bnx2x *bp = params->bp;
2945 u8 port = params->port; 2926 u8 port = params->port;
2946 2927
2947 /* first reset all status 2928 /*
2948 * we assume only one line will be change at a time */ 2929 * First reset all status we assume only one line will be
2930 * change at a time
2931 */
2949 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, 2932 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
2950 (NIG_STATUS_XGXS0_LINK10G | 2933 (NIG_STATUS_XGXS0_LINK10G |
2951 NIG_STATUS_XGXS0_LINK_STATUS | 2934 NIG_STATUS_XGXS0_LINK_STATUS |
2952 NIG_STATUS_SERDES0_LINK_STATUS)); 2935 NIG_STATUS_SERDES0_LINK_STATUS));
2953 if (vars->phy_link_up) { 2936 if (vars->phy_link_up) {
2954 if (is_10g) { 2937 if (is_10g) {
2955 /* Disable the 10G link interrupt 2938 /*
2956 * by writing 1 to the status register 2939 * Disable the 10G link interrupt by writing 1 to the
2940 * status register
2957 */ 2941 */
2958 DP(NETIF_MSG_LINK, "10G XGXS phy link up\n"); 2942 DP(NETIF_MSG_LINK, "10G XGXS phy link up\n");
2959 bnx2x_bits_en(bp, 2943 bnx2x_bits_en(bp,
@@ -2961,9 +2945,9 @@ static void bnx2x_link_int_ack(struct link_params *params,
2961 NIG_STATUS_XGXS0_LINK10G); 2945 NIG_STATUS_XGXS0_LINK10G);
2962 2946
2963 } else if (params->switch_cfg == SWITCH_CFG_10G) { 2947 } else if (params->switch_cfg == SWITCH_CFG_10G) {
2964 /* Disable the link interrupt 2948 /*
2965 * by writing 1 to the relevant lane 2949 * Disable the link interrupt by writing 1 to the
2966 * in the status register 2950 * relevant lane in the status register
2967 */ 2951 */
2968 u32 ser_lane = ((params->lane_config & 2952 u32 ser_lane = ((params->lane_config &
2969 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> 2953 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
@@ -2978,8 +2962,9 @@ static void bnx2x_link_int_ack(struct link_params *params,
2978 2962
2979 } else { /* SerDes */ 2963 } else { /* SerDes */
2980 DP(NETIF_MSG_LINK, "SerDes phy link up\n"); 2964 DP(NETIF_MSG_LINK, "SerDes phy link up\n");
2981 /* Disable the link interrupt 2965 /*
2982 * by writing 1 to the status register 2966 * Disable the link interrupt by writing 1 to the status
2967 * register
2983 */ 2968 */
2984 bnx2x_bits_en(bp, 2969 bnx2x_bits_en(bp,
2985 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, 2970 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
@@ -3059,8 +3044,7 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
3059 } 3044 }
3060 if ((params->num_phys == MAX_PHYS) && 3045 if ((params->num_phys == MAX_PHYS) &&
3061 (params->phy[EXT_PHY2].ver_addr != 0)) { 3046 (params->phy[EXT_PHY2].ver_addr != 0)) {
3062 spirom_ver = REG_RD(bp, 3047 spirom_ver = REG_RD(bp, params->phy[EXT_PHY2].ver_addr);
3063 params->phy[EXT_PHY2].ver_addr);
3064 if (params->phy[EXT_PHY2].format_fw_ver) { 3048 if (params->phy[EXT_PHY2].format_fw_ver) {
3065 *ver_p = '/'; 3049 *ver_p = '/';
3066 ver_p++; 3050 ver_p++;
@@ -3089,29 +3073,27 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
3089 3073
3090 /* change the uni_phy_addr in the nig */ 3074 /* change the uni_phy_addr in the nig */
3091 md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD + 3075 md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD +
3092 port*0x18)); 3076 port*0x18));
3093 3077
3094 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5); 3078 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5);
3095 3079
3096 bnx2x_cl45_write(bp, phy, 3080 bnx2x_cl45_write(bp, phy,
3097 5, 3081 5,
3098 (MDIO_REG_BANK_AER_BLOCK + 3082 (MDIO_REG_BANK_AER_BLOCK +
3099 (MDIO_AER_BLOCK_AER_REG & 0xf)), 3083 (MDIO_AER_BLOCK_AER_REG & 0xf)),
3100 0x2800); 3084 0x2800);
3101 3085
3102 bnx2x_cl45_write(bp, phy, 3086 bnx2x_cl45_write(bp, phy,
3103 5, 3087 5,
3104 (MDIO_REG_BANK_CL73_IEEEB0 + 3088 (MDIO_REG_BANK_CL73_IEEEB0 +
3105 (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)), 3089 (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
3106 0x6041); 3090 0x6041);
3107 msleep(200); 3091 msleep(200);
3108 /* set aer mmd back */ 3092 /* set aer mmd back */
3109 bnx2x_set_aer_mmd_xgxs(params, phy); 3093 bnx2x_set_aer_mmd_xgxs(params, phy);
3110 3094
3111 /* and md_devad */ 3095 /* and md_devad */
3112 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 3096 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, md_devad);
3113 md_devad);
3114
3115 } else { 3097 } else {
3116 u16 mii_ctrl; 3098 u16 mii_ctrl;
3117 DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n"); 3099 DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
@@ -3152,26 +3134,26 @@ u8 bnx2x_set_led(struct link_params *params,
3152 case LED_MODE_OFF: 3134 case LED_MODE_OFF:
3153 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0); 3135 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0);
3154 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 3136 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
3155 SHARED_HW_CFG_LED_MAC1); 3137 SHARED_HW_CFG_LED_MAC1);
3156 3138
3157 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); 3139 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
3158 EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE)); 3140 EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE));
3159 break; 3141 break;
3160 3142
3161 case LED_MODE_OPER: 3143 case LED_MODE_OPER:
3162 /** 3144 /*
3163 * For all other phys, OPER mode is same as ON, so in case 3145 * For all other phys, OPER mode is same as ON, so in case
3164 * link is down, do nothing 3146 * link is down, do nothing
3165 **/ 3147 */
3166 if (!vars->link_up) 3148 if (!vars->link_up)
3167 break; 3149 break;
3168 case LED_MODE_ON: 3150 case LED_MODE_ON:
3169 if (params->phy[EXT_PHY1].type == 3151 if (params->phy[EXT_PHY1].type ==
3170 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 && 3152 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 &&
3171 CHIP_IS_E2(bp) && params->num_phys == 2) { 3153 CHIP_IS_E2(bp) && params->num_phys == 2) {
3172 /** 3154 /*
3173 * This is a work-around for E2+8727 Configurations 3155 * This is a work-around for E2+8727 Configurations
3174 */ 3156 */
3175 if (mode == LED_MODE_ON || 3157 if (mode == LED_MODE_ON ||
3176 speed == SPEED_10000){ 3158 speed == SPEED_10000){
3177 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0); 3159 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
@@ -3183,41 +3165,40 @@ u8 bnx2x_set_led(struct link_params *params,
3183 return rc; 3165 return rc;
3184 } 3166 }
3185 } else if (SINGLE_MEDIA_DIRECT(params)) { 3167 } else if (SINGLE_MEDIA_DIRECT(params)) {
3186 /** 3168 /*
3187 * This is a work-around for HW issue found when link 3169 * This is a work-around for HW issue found when link
3188 * is up in CL73 3170 * is up in CL73
3189 */ 3171 */
3190 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0); 3172 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
3191 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); 3173 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
3192 } else { 3174 } else {
3193 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 3175 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, hw_led_mode);
3194 hw_led_mode);
3195 } 3176 }
3196 3177
3197 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + 3178 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
3198 port*4, 0);
3199 /* Set blinking rate to ~15.9Hz */ 3179 /* Set blinking rate to ~15.9Hz */
3200 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4, 3180 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
3201 LED_BLINK_RATE_VAL); 3181 LED_BLINK_RATE_VAL);
3202 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + 3182 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 +
3203 port*4, 1); 3183 port*4, 1);
3204 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); 3184 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
3205 EMAC_WR(bp, EMAC_REG_EMAC_LED, 3185 EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp & (~EMAC_LED_OVERRIDE)));
3206 (tmp & (~EMAC_LED_OVERRIDE)));
3207 3186
3208 if (CHIP_IS_E1(bp) && 3187 if (CHIP_IS_E1(bp) &&
3209 ((speed == SPEED_2500) || 3188 ((speed == SPEED_2500) ||
3210 (speed == SPEED_1000) || 3189 (speed == SPEED_1000) ||
3211 (speed == SPEED_100) || 3190 (speed == SPEED_100) ||
3212 (speed == SPEED_10))) { 3191 (speed == SPEED_10))) {
3213 /* On Everest 1 Ax chip versions for speeds less than 3192 /*
3214 10G LED scheme is different */ 3193 * On Everest 1 Ax chip versions for speeds less than
3194 * 10G LED scheme is different
3195 */
3215 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 3196 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
3216 + port*4, 1); 3197 + port*4, 1);
3217 REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 + 3198 REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 +
3218 port*4, 0); 3199 port*4, 0);
3219 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 + 3200 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 +
3220 port*4, 1); 3201 port*4, 1);
3221 } 3202 }
3222 break; 3203 break;
3223 3204
@@ -3231,7 +3212,7 @@ u8 bnx2x_set_led(struct link_params *params,
3231 3212
3232} 3213}
3233 3214
3234/** 3215/*
3235 * This function comes to reflect the actual link state read DIRECTLY from the 3216 * This function comes to reflect the actual link state read DIRECTLY from the
3236 * HW 3217 * HW
3237 */ 3218 */
@@ -3243,10 +3224,10 @@ u8 bnx2x_test_link(struct link_params *params, struct link_vars *vars,
3243 u8 ext_phy_link_up = 0, serdes_phy_type; 3224 u8 ext_phy_link_up = 0, serdes_phy_type;
3244 struct link_vars temp_vars; 3225 struct link_vars temp_vars;
3245 3226
3246 CL45_RD_OVER_CL22(bp, &params->phy[INT_PHY], 3227 CL22_RD_OVER_CL45(bp, &params->phy[INT_PHY],
3247 MDIO_REG_BANK_GP_STATUS, 3228 MDIO_REG_BANK_GP_STATUS,
3248 MDIO_GP_STATUS_TOP_AN_STATUS1, 3229 MDIO_GP_STATUS_TOP_AN_STATUS1,
3249 &gp_status); 3230 &gp_status);
3250 /* link is up only if both local phy and external phy are up */ 3231 /* link is up only if both local phy and external phy are up */
3251 if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS)) 3232 if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS))
3252 return -ESRCH; 3233 return -ESRCH;
@@ -3290,15 +3271,15 @@ static u8 bnx2x_link_initialize(struct link_params *params,
3290 u8 rc = 0; 3271 u8 rc = 0;
3291 u8 phy_index, non_ext_phy; 3272 u8 phy_index, non_ext_phy;
3292 struct bnx2x *bp = params->bp; 3273 struct bnx2x *bp = params->bp;
3293 /** 3274 /*
3294 * In case of external phy existence, the line speed would be the 3275 * In case of external phy existence, the line speed would be the
3295 * line speed linked up by the external phy. In case it is direct 3276 * line speed linked up by the external phy. In case it is direct
3296 * only, then the line_speed during initialization will be 3277 * only, then the line_speed during initialization will be
3297 * equal to the req_line_speed 3278 * equal to the req_line_speed
3298 */ 3279 */
3299 vars->line_speed = params->phy[INT_PHY].req_line_speed; 3280 vars->line_speed = params->phy[INT_PHY].req_line_speed;
3300 3281
3301 /** 3282 /*
3302 * Initialize the internal phy in case this is a direct board 3283 * Initialize the internal phy in case this is a direct board
3303 * (no external phys), or this board has external phy which requires 3284 * (no external phys), or this board has external phy which requires
3304 * to first. 3285 * to first.
@@ -3326,17 +3307,16 @@ static u8 bnx2x_link_initialize(struct link_params *params,
3326 if (!non_ext_phy) 3307 if (!non_ext_phy)
3327 for (phy_index = EXT_PHY1; phy_index < params->num_phys; 3308 for (phy_index = EXT_PHY1; phy_index < params->num_phys;
3328 phy_index++) { 3309 phy_index++) {
3329 /** 3310 /*
3330 * No need to initialize second phy in case of first 3311 * No need to initialize second phy in case of first
3331 * phy only selection. In case of second phy, we do 3312 * phy only selection. In case of second phy, we do
3332 * need to initialize the first phy, since they are 3313 * need to initialize the first phy, since they are
3333 * connected. 3314 * connected.
3334 **/ 3315 */
3335 if (phy_index == EXT_PHY2 && 3316 if (phy_index == EXT_PHY2 &&
3336 (bnx2x_phy_selection(params) == 3317 (bnx2x_phy_selection(params) ==
3337 PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) { 3318 PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) {
3338 DP(NETIF_MSG_LINK, "Not initializing" 3319 DP(NETIF_MSG_LINK, "Ignoring second phy\n");
3339 "second phy\n");
3340 continue; 3320 continue;
3341 } 3321 }
3342 params->phy[phy_index].config_init( 3322 params->phy[phy_index].config_init(
@@ -3358,9 +3338,8 @@ static void bnx2x_int_link_reset(struct bnx2x_phy *phy,
3358 struct link_params *params) 3338 struct link_params *params)
3359{ 3339{
3360 /* reset the SerDes/XGXS */ 3340 /* reset the SerDes/XGXS */
3361 REG_WR(params->bp, GRCBASE_MISC + 3341 REG_WR(params->bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
3362 MISC_REGISTERS_RESET_REG_3_CLEAR, 3342 (0x1ff << (params->port*16)));
3363 (0x1ff << (params->port*16)));
3364} 3343}
3365 3344
3366static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy, 3345static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy,
@@ -3374,11 +3353,11 @@ static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy,
3374 else 3353 else
3375 gpio_port = params->port; 3354 gpio_port = params->port;
3376 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 3355 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3377 MISC_REGISTERS_GPIO_OUTPUT_LOW, 3356 MISC_REGISTERS_GPIO_OUTPUT_LOW,
3378 gpio_port); 3357 gpio_port);
3379 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 3358 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
3380 MISC_REGISTERS_GPIO_OUTPUT_LOW, 3359 MISC_REGISTERS_GPIO_OUTPUT_LOW,
3381 gpio_port); 3360 gpio_port);
3382 DP(NETIF_MSG_LINK, "reset external PHY\n"); 3361 DP(NETIF_MSG_LINK, "reset external PHY\n");
3383} 3362}
3384 3363
@@ -3409,9 +3388,8 @@ static u8 bnx2x_update_link_down(struct link_params *params,
3409 3388
3410 /* reset BigMac */ 3389 /* reset BigMac */
3411 bnx2x_bmac_rx_disable(bp, params->port); 3390 bnx2x_bmac_rx_disable(bp, params->port);
3412 REG_WR(bp, GRCBASE_MISC + 3391 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
3413 MISC_REGISTERS_RESET_REG_2_CLEAR, 3392 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
3414 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
3415 return 0; 3393 return 0;
3416} 3394}
3417 3395
@@ -3462,7 +3440,7 @@ static u8 bnx2x_update_link_up(struct link_params *params,
3462 msleep(20); 3440 msleep(20);
3463 return rc; 3441 return rc;
3464} 3442}
3465/** 3443/*
3466 * The bnx2x_link_update function should be called upon link 3444 * The bnx2x_link_update function should be called upon link
3467 * interrupt. 3445 * interrupt.
3468 * Link is considered up as follows: 3446 * Link is considered up as follows:
@@ -3501,12 +3479,11 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3501 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4)); 3479 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
3502 3480
3503 is_mi_int = (u8)(REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + 3481 is_mi_int = (u8)(REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT +
3504 port*0x18) > 0); 3482 port*0x18) > 0);
3505 DP(NETIF_MSG_LINK, "int_mask 0x%x MI_INT %x, SERDES_LINK %x\n", 3483 DP(NETIF_MSG_LINK, "int_mask 0x%x MI_INT %x, SERDES_LINK %x\n",
3506 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4), 3484 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
3507 is_mi_int, 3485 is_mi_int,
3508 REG_RD(bp, 3486 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c));
3509 NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c));
3510 3487
3511 DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n", 3488 DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
3512 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68), 3489 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
@@ -3515,14 +3492,14 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3515 /* disable emac */ 3492 /* disable emac */
3516 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); 3493 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
3517 3494
3518 /** 3495 /*
3519 * Step 1: 3496 * Step 1:
3520 * Check external link change only for external phys, and apply 3497 * Check external link change only for external phys, and apply
3521 * priority selection between them in case the link on both phys 3498 * priority selection between them in case the link on both phys
3522 * is up. Note that the instead of the common vars, a temporary 3499 * is up. Note that the instead of the common vars, a temporary
3523 * vars argument is used since each phy may have different link/ 3500 * vars argument is used since each phy may have different link/
3524 * speed/duplex result 3501 * speed/duplex result
3525 */ 3502 */
3526 for (phy_index = EXT_PHY1; phy_index < params->num_phys; 3503 for (phy_index = EXT_PHY1; phy_index < params->num_phys;
3527 phy_index++) { 3504 phy_index++) {
3528 struct bnx2x_phy *phy = &params->phy[phy_index]; 3505 struct bnx2x_phy *phy = &params->phy[phy_index];
@@ -3547,22 +3524,22 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3547 switch (bnx2x_phy_selection(params)) { 3524 switch (bnx2x_phy_selection(params)) {
3548 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: 3525 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3549 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: 3526 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
3550 /** 3527 /*
3551 * In this option, the first PHY makes sure to pass the 3528 * In this option, the first PHY makes sure to pass the
3552 * traffic through itself only. 3529 * traffic through itself only.
3553 * Its not clear how to reset the link on the second phy 3530 * Its not clear how to reset the link on the second phy
3554 **/ 3531 */
3555 active_external_phy = EXT_PHY1; 3532 active_external_phy = EXT_PHY1;
3556 break; 3533 break;
3557 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: 3534 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
3558 /** 3535 /*
3559 * In this option, the first PHY makes sure to pass the 3536 * In this option, the first PHY makes sure to pass the
3560 * traffic through the second PHY. 3537 * traffic through the second PHY.
3561 **/ 3538 */
3562 active_external_phy = EXT_PHY2; 3539 active_external_phy = EXT_PHY2;
3563 break; 3540 break;
3564 default: 3541 default:
3565 /** 3542 /*
3566 * Link indication on both PHYs with the following cases 3543 * Link indication on both PHYs with the following cases
3567 * is invalid: 3544 * is invalid:
3568 * - FIRST_PHY means that second phy wasn't initialized, 3545 * - FIRST_PHY means that second phy wasn't initialized,
@@ -3570,7 +3547,7 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3570 * - SECOND_PHY means that first phy should not be able 3547 * - SECOND_PHY means that first phy should not be able
3571 * to link up by itself (using configuration) 3548 * to link up by itself (using configuration)
3572 * - DEFAULT should be overriden during initialiazation 3549 * - DEFAULT should be overriden during initialiazation
3573 **/ 3550 */
3574 DP(NETIF_MSG_LINK, "Invalid link indication" 3551 DP(NETIF_MSG_LINK, "Invalid link indication"
3575 "mpc=0x%x. DISABLING LINK !!!\n", 3552 "mpc=0x%x. DISABLING LINK !!!\n",
3576 params->multi_phy_config); 3553 params->multi_phy_config);
@@ -3580,18 +3557,18 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3580 } 3557 }
3581 } 3558 }
3582 prev_line_speed = vars->line_speed; 3559 prev_line_speed = vars->line_speed;
3583 /** 3560 /*
3584 * Step 2: 3561 * Step 2:
3585 * Read the status of the internal phy. In case of 3562 * Read the status of the internal phy. In case of
3586 * DIRECT_SINGLE_MEDIA board, this link is the external link, 3563 * DIRECT_SINGLE_MEDIA board, this link is the external link,
3587 * otherwise this is the link between the 577xx and the first 3564 * otherwise this is the link between the 577xx and the first
3588 * external phy 3565 * external phy
3589 */ 3566 */
3590 if (params->phy[INT_PHY].read_status) 3567 if (params->phy[INT_PHY].read_status)
3591 params->phy[INT_PHY].read_status( 3568 params->phy[INT_PHY].read_status(
3592 &params->phy[INT_PHY], 3569 &params->phy[INT_PHY],
3593 params, vars); 3570 params, vars);
3594 /** 3571 /*
3595 * The INT_PHY flow control reside in the vars. This include the 3572 * The INT_PHY flow control reside in the vars. This include the
3596 * case where the speed or flow control are not set to AUTO. 3573 * case where the speed or flow control are not set to AUTO.
3597 * Otherwise, the active external phy flow control result is set 3574 * Otherwise, the active external phy flow control result is set
@@ -3601,13 +3578,13 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3601 */ 3578 */
3602 if (active_external_phy > INT_PHY) { 3579 if (active_external_phy > INT_PHY) {
3603 vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl; 3580 vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl;
3604 /** 3581 /*
3605 * Link speed is taken from the XGXS. AN and FC result from 3582 * Link speed is taken from the XGXS. AN and FC result from
3606 * the external phy. 3583 * the external phy.
3607 */ 3584 */
3608 vars->link_status |= phy_vars[active_external_phy].link_status; 3585 vars->link_status |= phy_vars[active_external_phy].link_status;
3609 3586
3610 /** 3587 /*
3611 * if active_external_phy is first PHY and link is up - disable 3588 * if active_external_phy is first PHY and link is up - disable
3612 * disable TX on second external PHY 3589 * disable TX on second external PHY
3613 */ 3590 */
@@ -3643,7 +3620,7 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3643 DP(NETIF_MSG_LINK, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x," 3620 DP(NETIF_MSG_LINK, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x,"
3644 " ext_phy_line_speed = %d\n", vars->flow_ctrl, 3621 " ext_phy_line_speed = %d\n", vars->flow_ctrl,
3645 vars->link_status, ext_phy_line_speed); 3622 vars->link_status, ext_phy_line_speed);
3646 /** 3623 /*
3647 * Upon link speed change set the NIG into drain mode. Comes to 3624 * Upon link speed change set the NIG into drain mode. Comes to
3648 * deals with possible FIFO glitch due to clk change when speed 3625 * deals with possible FIFO glitch due to clk change when speed
3649 * is decreased without link down indicator 3626 * is decreased without link down indicator
@@ -3658,8 +3635,8 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3658 ext_phy_line_speed); 3635 ext_phy_line_speed);
3659 vars->phy_link_up = 0; 3636 vars->phy_link_up = 0;
3660 } else if (prev_line_speed != vars->line_speed) { 3637 } else if (prev_line_speed != vars->line_speed) {
3661 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE 3638 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4,
3662 + params->port*4, 0); 3639 0);
3663 msleep(1); 3640 msleep(1);
3664 } 3641 }
3665 } 3642 }
@@ -3674,14 +3651,14 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3674 3651
3675 bnx2x_link_int_ack(params, vars, link_10g); 3652 bnx2x_link_int_ack(params, vars, link_10g);
3676 3653
3677 /** 3654 /*
3678 * In case external phy link is up, and internal link is down 3655 * In case external phy link is up, and internal link is down
3679 * (not initialized yet probably after link initialization, it 3656 * (not initialized yet probably after link initialization, it
3680 * needs to be initialized. 3657 * needs to be initialized.
3681 * Note that after link down-up as result of cable plug, the xgxs 3658 * Note that after link down-up as result of cable plug, the xgxs
3682 * link would probably become up again without the need 3659 * link would probably become up again without the need
3683 * initialize it 3660 * initialize it
3684 */ 3661 */
3685 if (!(SINGLE_MEDIA_DIRECT(params))) { 3662 if (!(SINGLE_MEDIA_DIRECT(params))) {
3686 DP(NETIF_MSG_LINK, "ext_phy_link_up = %d, int_link_up = %d," 3663 DP(NETIF_MSG_LINK, "ext_phy_link_up = %d, int_link_up = %d,"
3687 " init_preceding = %d\n", ext_phy_link_up, 3664 " init_preceding = %d\n", ext_phy_link_up,
@@ -3701,9 +3678,9 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3701 vars); 3678 vars);
3702 } 3679 }
3703 } 3680 }
3704 /** 3681 /*
3705 * Link is up only if both local phy and external phy (in case of 3682 * Link is up only if both local phy and external phy (in case of
3706 * non-direct board) are up 3683 * non-direct board) are up
3707 */ 3684 */
3708 vars->link_up = (vars->phy_link_up && 3685 vars->link_up = (vars->phy_link_up &&
3709 (ext_phy_link_up || 3686 (ext_phy_link_up ||
@@ -3724,10 +3701,10 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
3724void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port) 3701void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port)
3725{ 3702{
3726 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 3703 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3727 MISC_REGISTERS_GPIO_OUTPUT_LOW, port); 3704 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
3728 msleep(1); 3705 msleep(1);
3729 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 3706 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3730 MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); 3707 MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
3731} 3708}
3732 3709
3733static void bnx2x_save_spirom_version(struct bnx2x *bp, u8 port, 3710static void bnx2x_save_spirom_version(struct bnx2x *bp, u8 port,
@@ -3747,9 +3724,9 @@ static void bnx2x_save_bcm_spirom_ver(struct bnx2x *bp,
3747 u16 fw_ver1, fw_ver2; 3724 u16 fw_ver1, fw_ver2;
3748 3725
3749 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 3726 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
3750 MDIO_PMA_REG_ROM_VER1, &fw_ver1); 3727 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
3751 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 3728 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
3752 MDIO_PMA_REG_ROM_VER2, &fw_ver2); 3729 MDIO_PMA_REG_ROM_VER2, &fw_ver2);
3753 bnx2x_save_spirom_version(bp, port, (u32)(fw_ver1<<16 | fw_ver2), 3730 bnx2x_save_spirom_version(bp, port, (u32)(fw_ver1<<16 | fw_ver2),
3754 phy->ver_addr); 3731 phy->ver_addr);
3755} 3732}
@@ -3770,7 +3747,7 @@ static void bnx2x_ext_phy_set_pause(struct link_params *params,
3770 if ((vars->ieee_fc & 3747 if ((vars->ieee_fc &
3771 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) == 3748 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
3772 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) { 3749 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
3773 val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC; 3750 val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
3774 } 3751 }
3775 if ((vars->ieee_fc & 3752 if ((vars->ieee_fc &
3776 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) == 3753 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
@@ -3801,11 +3778,11 @@ static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
3801 else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) { 3778 else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
3802 ret = 1; 3779 ret = 1;
3803 bnx2x_cl45_read(bp, phy, 3780 bnx2x_cl45_read(bp, phy,
3804 MDIO_AN_DEVAD, 3781 MDIO_AN_DEVAD,
3805 MDIO_AN_REG_ADV_PAUSE, &ld_pause); 3782 MDIO_AN_REG_ADV_PAUSE, &ld_pause);
3806 bnx2x_cl45_read(bp, phy, 3783 bnx2x_cl45_read(bp, phy,
3807 MDIO_AN_DEVAD, 3784 MDIO_AN_DEVAD,
3808 MDIO_AN_REG_LP_AUTO_NEG, &lp_pause); 3785 MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
3809 pause_result = (ld_pause & 3786 pause_result = (ld_pause &
3810 MDIO_AN_REG_ADV_PAUSE_MASK) >> 8; 3787 MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
3811 pause_result |= (lp_pause & 3788 pause_result |= (lp_pause &
@@ -3881,31 +3858,31 @@ static u8 bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
3881 /* Boot port from external ROM */ 3858 /* Boot port from external ROM */
3882 /* EDC grst */ 3859 /* EDC grst */
3883 bnx2x_cl45_write(bp, phy, 3860 bnx2x_cl45_write(bp, phy,
3884 MDIO_PMA_DEVAD, 3861 MDIO_PMA_DEVAD,
3885 MDIO_PMA_REG_GEN_CTRL, 3862 MDIO_PMA_REG_GEN_CTRL,
3886 0x0001); 3863 0x0001);
3887 3864
3888 /* ucode reboot and rst */ 3865 /* ucode reboot and rst */
3889 bnx2x_cl45_write(bp, phy, 3866 bnx2x_cl45_write(bp, phy,
3890 MDIO_PMA_DEVAD, 3867 MDIO_PMA_DEVAD,
3891 MDIO_PMA_REG_GEN_CTRL, 3868 MDIO_PMA_REG_GEN_CTRL,
3892 0x008c); 3869 0x008c);
3893 3870
3894 bnx2x_cl45_write(bp, phy, 3871 bnx2x_cl45_write(bp, phy,
3895 MDIO_PMA_DEVAD, 3872 MDIO_PMA_DEVAD,
3896 MDIO_PMA_REG_MISC_CTRL1, 0x0001); 3873 MDIO_PMA_REG_MISC_CTRL1, 0x0001);
3897 3874
3898 /* Reset internal microprocessor */ 3875 /* Reset internal microprocessor */
3899 bnx2x_cl45_write(bp, phy, 3876 bnx2x_cl45_write(bp, phy,
3900 MDIO_PMA_DEVAD, 3877 MDIO_PMA_DEVAD,
3901 MDIO_PMA_REG_GEN_CTRL, 3878 MDIO_PMA_REG_GEN_CTRL,
3902 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET); 3879 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
3903 3880
3904 /* Release srst bit */ 3881 /* Release srst bit */
3905 bnx2x_cl45_write(bp, phy, 3882 bnx2x_cl45_write(bp, phy,
3906 MDIO_PMA_DEVAD, 3883 MDIO_PMA_DEVAD,
3907 MDIO_PMA_REG_GEN_CTRL, 3884 MDIO_PMA_REG_GEN_CTRL,
3908 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); 3885 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
3909 3886
3910 /* Delay 100ms per the PHY specifications */ 3887 /* Delay 100ms per the PHY specifications */
3911 msleep(100); 3888 msleep(100);
@@ -3936,8 +3913,8 @@ static u8 bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
3936 3913
3937 /* Clear ser_boot_ctl bit */ 3914 /* Clear ser_boot_ctl bit */
3938 bnx2x_cl45_write(bp, phy, 3915 bnx2x_cl45_write(bp, phy,
3939 MDIO_PMA_DEVAD, 3916 MDIO_PMA_DEVAD,
3940 MDIO_PMA_REG_MISC_CTRL1, 0x0000); 3917 MDIO_PMA_REG_MISC_CTRL1, 0x0000);
3941 bnx2x_save_bcm_spirom_ver(bp, phy, port); 3918 bnx2x_save_bcm_spirom_ver(bp, phy, port);
3942 3919
3943 DP(NETIF_MSG_LINK, 3920 DP(NETIF_MSG_LINK,
@@ -3958,8 +3935,8 @@ static u8 bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy)
3958 3935
3959 /* Read 8073 HW revision*/ 3936 /* Read 8073 HW revision*/
3960 bnx2x_cl45_read(bp, phy, 3937 bnx2x_cl45_read(bp, phy,
3961 MDIO_PMA_DEVAD, 3938 MDIO_PMA_DEVAD,
3962 MDIO_PMA_REG_8073_CHIP_REV, &val); 3939 MDIO_PMA_REG_8073_CHIP_REV, &val);
3963 3940
3964 if (val != 1) { 3941 if (val != 1) {
3965 /* No need to workaround in 8073 A1 */ 3942 /* No need to workaround in 8073 A1 */
@@ -3967,8 +3944,8 @@ static u8 bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy)
3967 } 3944 }
3968 3945
3969 bnx2x_cl45_read(bp, phy, 3946 bnx2x_cl45_read(bp, phy,
3970 MDIO_PMA_DEVAD, 3947 MDIO_PMA_DEVAD,
3971 MDIO_PMA_REG_ROM_VER2, &val); 3948 MDIO_PMA_REG_ROM_VER2, &val);
3972 3949
3973 /* SNR should be applied only for version 0x102 */ 3950 /* SNR should be applied only for version 0x102 */
3974 if (val != 0x102) 3951 if (val != 0x102)
@@ -3982,8 +3959,8 @@ static u8 bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
3982 u16 val, cnt, cnt1 ; 3959 u16 val, cnt, cnt1 ;
3983 3960
3984 bnx2x_cl45_read(bp, phy, 3961 bnx2x_cl45_read(bp, phy,
3985 MDIO_PMA_DEVAD, 3962 MDIO_PMA_DEVAD,
3986 MDIO_PMA_REG_8073_CHIP_REV, &val); 3963 MDIO_PMA_REG_8073_CHIP_REV, &val);
3987 3964
3988 if (val > 0) { 3965 if (val > 0) {
3989 /* No need to workaround in 8073 A1 */ 3966 /* No need to workaround in 8073 A1 */
@@ -3991,26 +3968,32 @@ static u8 bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
3991 } 3968 }
3992 /* XAUI workaround in 8073 A0: */ 3969 /* XAUI workaround in 8073 A0: */
3993 3970
3994 /* After loading the boot ROM and restarting Autoneg, 3971 /*
3995 poll Dev1, Reg $C820: */ 3972 * After loading the boot ROM and restarting Autoneg, poll
3973 * Dev1, Reg $C820:
3974 */
3996 3975
3997 for (cnt = 0; cnt < 1000; cnt++) { 3976 for (cnt = 0; cnt < 1000; cnt++) {
3998 bnx2x_cl45_read(bp, phy, 3977 bnx2x_cl45_read(bp, phy,
3999 MDIO_PMA_DEVAD, 3978 MDIO_PMA_DEVAD,
4000 MDIO_PMA_REG_8073_SPEED_LINK_STATUS, 3979 MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
4001 &val); 3980 &val);
4002 /* If bit [14] = 0 or bit [13] = 0, continue on with 3981 /*
4003 system initialization (XAUI work-around not required, 3982 * If bit [14] = 0 or bit [13] = 0, continue on with
4004 as these bits indicate 2.5G or 1G link up). */ 3983 * system initialization (XAUI work-around not required, as
3984 * these bits indicate 2.5G or 1G link up).
3985 */
4005 if (!(val & (1<<14)) || !(val & (1<<13))) { 3986 if (!(val & (1<<14)) || !(val & (1<<13))) {
4006 DP(NETIF_MSG_LINK, "XAUI work-around not required\n"); 3987 DP(NETIF_MSG_LINK, "XAUI work-around not required\n");
4007 return 0; 3988 return 0;
4008 } else if (!(val & (1<<15))) { 3989 } else if (!(val & (1<<15))) {
4009 DP(NETIF_MSG_LINK, "clc bit 15 went off\n"); 3990 DP(NETIF_MSG_LINK, "bit 15 went off\n");
4010 /* If bit 15 is 0, then poll Dev1, Reg $C841 until 3991 /*
4011 it's MSB (bit 15) goes to 1 (indicating that the 3992 * If bit 15 is 0, then poll Dev1, Reg $C841 until it's
4012 XAUI workaround has completed), 3993 * MSB (bit15) goes to 1 (indicating that the XAUI
4013 then continue on with system initialization.*/ 3994 * workaround has completed), then continue on with
3995 * system initialization.
3996 */
4014 for (cnt1 = 0; cnt1 < 1000; cnt1++) { 3997 for (cnt1 = 0; cnt1 < 1000; cnt1++) {
4015 bnx2x_cl45_read(bp, phy, 3998 bnx2x_cl45_read(bp, phy,
4016 MDIO_PMA_DEVAD, 3999 MDIO_PMA_DEVAD,
@@ -4093,10 +4076,10 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
4093 gpio_port = params->port; 4076 gpio_port = params->port;
4094 /* Restore normal power mode*/ 4077 /* Restore normal power mode*/
4095 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 4078 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
4096 MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port); 4079 MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
4097 4080
4098 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 4081 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
4099 MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port); 4082 MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
4100 4083
4101 /* enable LASI */ 4084 /* enable LASI */
4102 bnx2x_cl45_write(bp, phy, 4085 bnx2x_cl45_write(bp, phy,
@@ -4114,10 +4097,6 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
4114 4097
4115 DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1); 4098 DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1);
4116 4099
4117 /**
4118 * If this is forced speed, set to KR or KX (all other are not
4119 * supported)
4120 */
4121 /* Swap polarity if required - Must be done only in non-1G mode */ 4100 /* Swap polarity if required - Must be done only in non-1G mode */
4122 if (params->lane_config & PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) { 4101 if (params->lane_config & PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
4123 /* Configure the 8073 to swap _P and _N of the KR lines */ 4102 /* Configure the 8073 to swap _P and _N of the KR lines */
@@ -4160,8 +4139,10 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
4160 val = (1<<7); 4139 val = (1<<7);
4161 } else if (phy->req_line_speed == SPEED_2500) { 4140 } else if (phy->req_line_speed == SPEED_2500) {
4162 val = (1<<5); 4141 val = (1<<5);
4163 /* Note that 2.5G works only 4142 /*
4164 when used with 1G advertisment */ 4143 * Note that 2.5G works only when used with 1G
4144 * advertisment
4145 */
4165 } else 4146 } else
4166 val = (1<<5); 4147 val = (1<<5);
4167 } else { 4148 } else {
@@ -4170,8 +4151,7 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
4170 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) 4151 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
4171 val |= (1<<7); 4152 val |= (1<<7);
4172 4153
4173 /* Note that 2.5G works only when 4154 /* Note that 2.5G works only when used with 1G advertisment */
4174 used with 1G advertisment */
4175 if (phy->speed_cap_mask & 4155 if (phy->speed_cap_mask &
4176 (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G | 4156 (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G |
4177 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) 4157 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
@@ -4211,9 +4191,11 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
4211 /* Add support for CL37 (passive mode) III */ 4191 /* Add support for CL37 (passive mode) III */
4212 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000); 4192 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
4213 4193
4214 /* The SNR will improve about 2db by changing 4194 /*
4215 BW and FEE main tap. Rest commands are executed 4195 * The SNR will improve about 2db by changing BW and FEE main
4216 after link is up*/ 4196 * tap. Rest commands are executed after link is up
4197 * Change FFE main cursor to 5 in EDC register
4198 */
4217 if (bnx2x_8073_is_snr_needed(bp, phy)) 4199 if (bnx2x_8073_is_snr_needed(bp, phy))
4218 bnx2x_cl45_write(bp, phy, 4200 bnx2x_cl45_write(bp, phy,
4219 MDIO_PMA_DEVAD, MDIO_PMA_REG_EDC_FFE_MAIN, 4201 MDIO_PMA_DEVAD, MDIO_PMA_REG_EDC_FFE_MAIN,
@@ -4297,12 +4279,11 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
4297 4279
4298 link_up = (((val1 & 4) == 4) || (an1000_status & (1<<1))); 4280 link_up = (((val1 & 4) == 4) || (an1000_status & (1<<1)));
4299 if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) { 4281 if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) {
4300 /* The SNR will improve about 2dbby 4282 /*
4301 changing the BW and FEE main tap.*/ 4283 * The SNR will improve about 2dbby changing the BW and FEE main
4302 /* The 1st write to change FFE main 4284 * tap. The 1st write to change FFE main tap is set before
4303 tap is set before restart AN */ 4285 * restart AN. Change PLL Bandwidth in EDC register
4304 /* Change PLL Bandwidth in EDC 4286 */
4305 register */
4306 bnx2x_cl45_write(bp, phy, 4287 bnx2x_cl45_write(bp, phy,
4307 MDIO_PMA_DEVAD, MDIO_PMA_REG_PLL_BANDWIDTH, 4288 MDIO_PMA_DEVAD, MDIO_PMA_REG_PLL_BANDWIDTH,
4308 0x26BC); 4289 0x26BC);
@@ -4346,10 +4327,10 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
4346 bnx2x_cl45_read(bp, phy, 4327 bnx2x_cl45_read(bp, phy,
4347 MDIO_XS_DEVAD, 4328 MDIO_XS_DEVAD,
4348 MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1); 4329 MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1);
4349 /** 4330 /*
4350 * Set bit 3 to invert Rx in 1G mode and clear this bit 4331 * Set bit 3 to invert Rx in 1G mode and clear this bit
4351 * when it`s in 10G mode. 4332 * when it`s in 10G mode.
4352 */ 4333 */
4353 if (vars->line_speed == SPEED_1000) { 4334 if (vars->line_speed == SPEED_1000) {
4354 DP(NETIF_MSG_LINK, "Swapping 1G polarity for" 4335 DP(NETIF_MSG_LINK, "Swapping 1G polarity for"
4355 "the 8073\n"); 4336 "the 8073\n");
@@ -4381,8 +4362,8 @@ static void bnx2x_8073_link_reset(struct bnx2x_phy *phy,
4381 DP(NETIF_MSG_LINK, "Setting 8073 port %d into low power mode\n", 4362 DP(NETIF_MSG_LINK, "Setting 8073 port %d into low power mode\n",
4382 gpio_port); 4363 gpio_port);
4383 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 4364 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
4384 MISC_REGISTERS_GPIO_OUTPUT_LOW, 4365 MISC_REGISTERS_GPIO_OUTPUT_LOW,
4385 gpio_port); 4366 gpio_port);
4386} 4367}
4387 4368
4388/******************************************************************/ 4369/******************************************************************/
@@ -4396,11 +4377,11 @@ static u8 bnx2x_8705_config_init(struct bnx2x_phy *phy,
4396 DP(NETIF_MSG_LINK, "init 8705\n"); 4377 DP(NETIF_MSG_LINK, "init 8705\n");
4397 /* Restore normal power mode*/ 4378 /* Restore normal power mode*/
4398 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 4379 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
4399 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); 4380 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
4400 /* HW reset */ 4381 /* HW reset */
4401 bnx2x_ext_phy_hw_reset(bp, params->port); 4382 bnx2x_ext_phy_hw_reset(bp, params->port);
4402 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040); 4383 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
4403 bnx2x_wait_reset_complete(bp, phy); 4384 bnx2x_wait_reset_complete(bp, phy, params);
4404 4385
4405 bnx2x_cl45_write(bp, phy, 4386 bnx2x_cl45_write(bp, phy,
4406 MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL, 0x8288); 4387 MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL, 0x8288);
@@ -4451,35 +4432,79 @@ static u8 bnx2x_8705_read_status(struct bnx2x_phy *phy,
4451/******************************************************************/ 4432/******************************************************************/
4452/* SFP+ module Section */ 4433/* SFP+ module Section */
4453/******************************************************************/ 4434/******************************************************************/
4454static void bnx2x_sfp_set_transmitter(struct bnx2x *bp, 4435static u8 bnx2x_get_gpio_port(struct link_params *params)
4436{
4437 u8 gpio_port;
4438 u32 swap_val, swap_override;
4439 struct bnx2x *bp = params->bp;
4440 if (CHIP_IS_E2(bp))
4441 gpio_port = BP_PATH(bp);
4442 else
4443 gpio_port = params->port;
4444 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
4445 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
4446 return gpio_port ^ (swap_val && swap_override);
4447}
4448static void bnx2x_sfp_set_transmitter(struct link_params *params,
4455 struct bnx2x_phy *phy, 4449 struct bnx2x_phy *phy,
4456 u8 port,
4457 u8 tx_en) 4450 u8 tx_en)
4458{ 4451{
4459 u16 val; 4452 u16 val;
4453 u8 port = params->port;
4454 struct bnx2x *bp = params->bp;
4455 u32 tx_en_mode;
4460 4456
4461 DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x\n",
4462 tx_en, port);
4463 /* Disable/Enable transmitter ( TX laser of the SFP+ module.)*/ 4457 /* Disable/Enable transmitter ( TX laser of the SFP+ module.)*/
4464 bnx2x_cl45_read(bp, phy, 4458 tx_en_mode = REG_RD(bp, params->shmem_base +
4465 MDIO_PMA_DEVAD, 4459 offsetof(struct shmem_region,
4466 MDIO_PMA_REG_PHY_IDENTIFIER, 4460 dev_info.port_hw_config[port].sfp_ctrl)) &
4467 &val); 4461 PORT_HW_CFG_TX_LASER_MASK;
4462 DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x "
4463 "mode = %x\n", tx_en, port, tx_en_mode);
4464 switch (tx_en_mode) {
4465 case PORT_HW_CFG_TX_LASER_MDIO:
4468 4466
4469 if (tx_en) 4467 bnx2x_cl45_read(bp, phy,
4470 val &= ~(1<<15); 4468 MDIO_PMA_DEVAD,
4471 else 4469 MDIO_PMA_REG_PHY_IDENTIFIER,
4472 val |= (1<<15); 4470 &val);
4473 4471
4474 bnx2x_cl45_write(bp, phy, 4472 if (tx_en)
4475 MDIO_PMA_DEVAD, 4473 val &= ~(1<<15);
4476 MDIO_PMA_REG_PHY_IDENTIFIER, 4474 else
4477 val); 4475 val |= (1<<15);
4476
4477 bnx2x_cl45_write(bp, phy,
4478 MDIO_PMA_DEVAD,
4479 MDIO_PMA_REG_PHY_IDENTIFIER,
4480 val);
4481 break;
4482 case PORT_HW_CFG_TX_LASER_GPIO0:
4483 case PORT_HW_CFG_TX_LASER_GPIO1:
4484 case PORT_HW_CFG_TX_LASER_GPIO2:
4485 case PORT_HW_CFG_TX_LASER_GPIO3:
4486 {
4487 u16 gpio_pin;
4488 u8 gpio_port, gpio_mode;
4489 if (tx_en)
4490 gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_HIGH;
4491 else
4492 gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_LOW;
4493
4494 gpio_pin = tx_en_mode - PORT_HW_CFG_TX_LASER_GPIO0;
4495 gpio_port = bnx2x_get_gpio_port(params);
4496 bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port);
4497 break;
4498 }
4499 default:
4500 DP(NETIF_MSG_LINK, "Invalid TX_LASER_MDIO 0x%x\n", tx_en_mode);
4501 break;
4502 }
4478} 4503}
4479 4504
4480static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy, 4505static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
4481 struct link_params *params, 4506 struct link_params *params,
4482 u16 addr, u8 byte_cnt, u8 *o_buf) 4507 u16 addr, u8 byte_cnt, u8 *o_buf)
4483{ 4508{
4484 struct bnx2x *bp = params->bp; 4509 struct bnx2x *bp = params->bp;
4485 u16 val = 0; 4510 u16 val = 0;
@@ -4492,23 +4517,23 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
4492 /* Set the read command byte count */ 4517 /* Set the read command byte count */
4493 bnx2x_cl45_write(bp, phy, 4518 bnx2x_cl45_write(bp, phy,
4494 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT, 4519 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
4495 (byte_cnt | 0xa000)); 4520 (byte_cnt | 0xa000));
4496 4521
4497 /* Set the read command address */ 4522 /* Set the read command address */
4498 bnx2x_cl45_write(bp, phy, 4523 bnx2x_cl45_write(bp, phy,
4499 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR, 4524 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
4500 addr); 4525 addr);
4501 4526
4502 /* Activate read command */ 4527 /* Activate read command */
4503 bnx2x_cl45_write(bp, phy, 4528 bnx2x_cl45_write(bp, phy,
4504 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, 4529 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
4505 0x2c0f); 4530 0x2c0f);
4506 4531
4507 /* Wait up to 500us for command complete status */ 4532 /* Wait up to 500us for command complete status */
4508 for (i = 0; i < 100; i++) { 4533 for (i = 0; i < 100; i++) {
4509 bnx2x_cl45_read(bp, phy, 4534 bnx2x_cl45_read(bp, phy,
4510 MDIO_PMA_DEVAD, 4535 MDIO_PMA_DEVAD,
4511 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); 4536 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
4512 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == 4537 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
4513 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) 4538 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE)
4514 break; 4539 break;
@@ -4526,15 +4551,15 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
4526 /* Read the buffer */ 4551 /* Read the buffer */
4527 for (i = 0; i < byte_cnt; i++) { 4552 for (i = 0; i < byte_cnt; i++) {
4528 bnx2x_cl45_read(bp, phy, 4553 bnx2x_cl45_read(bp, phy,
4529 MDIO_PMA_DEVAD, 4554 MDIO_PMA_DEVAD,
4530 MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val); 4555 MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val);
4531 o_buf[i] = (u8)(val & MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK); 4556 o_buf[i] = (u8)(val & MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK);
4532 } 4557 }
4533 4558
4534 for (i = 0; i < 100; i++) { 4559 for (i = 0; i < 100; i++) {
4535 bnx2x_cl45_read(bp, phy, 4560 bnx2x_cl45_read(bp, phy,
4536 MDIO_PMA_DEVAD, 4561 MDIO_PMA_DEVAD,
4537 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); 4562 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
4538 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == 4563 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
4539 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE) 4564 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
4540 return 0; 4565 return 0;
@@ -4545,7 +4570,7 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
4545 4570
4546static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy, 4571static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
4547 struct link_params *params, 4572 struct link_params *params,
4548 u16 addr, u8 byte_cnt, u8 *o_buf) 4573 u16 addr, u8 byte_cnt, u8 *o_buf)
4549{ 4574{
4550 struct bnx2x *bp = params->bp; 4575 struct bnx2x *bp = params->bp;
4551 u16 val, i; 4576 u16 val, i;
@@ -4558,41 +4583,43 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
4558 4583
4559 /* Need to read from 1.8000 to clear it */ 4584 /* Need to read from 1.8000 to clear it */
4560 bnx2x_cl45_read(bp, phy, 4585 bnx2x_cl45_read(bp, phy,
4561 MDIO_PMA_DEVAD, 4586 MDIO_PMA_DEVAD,
4562 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, 4587 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
4563 &val); 4588 &val);
4564 4589
4565 /* Set the read command byte count */ 4590 /* Set the read command byte count */
4566 bnx2x_cl45_write(bp, phy, 4591 bnx2x_cl45_write(bp, phy,
4567 MDIO_PMA_DEVAD, 4592 MDIO_PMA_DEVAD,
4568 MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT, 4593 MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
4569 ((byte_cnt < 2) ? 2 : byte_cnt)); 4594 ((byte_cnt < 2) ? 2 : byte_cnt));
4570 4595
4571 /* Set the read command address */ 4596 /* Set the read command address */
4572 bnx2x_cl45_write(bp, phy, 4597 bnx2x_cl45_write(bp, phy,
4573 MDIO_PMA_DEVAD, 4598 MDIO_PMA_DEVAD,
4574 MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR, 4599 MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
4575 addr); 4600 addr);
4576 /* Set the destination address */ 4601 /* Set the destination address */
4577 bnx2x_cl45_write(bp, phy, 4602 bnx2x_cl45_write(bp, phy,
4578 MDIO_PMA_DEVAD, 4603 MDIO_PMA_DEVAD,
4579 0x8004, 4604 0x8004,
4580 MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF); 4605 MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF);
4581 4606
4582 /* Activate read command */ 4607 /* Activate read command */
4583 bnx2x_cl45_write(bp, phy, 4608 bnx2x_cl45_write(bp, phy,
4584 MDIO_PMA_DEVAD, 4609 MDIO_PMA_DEVAD,
4585 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, 4610 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
4586 0x8002); 4611 0x8002);
4587 /* Wait appropriate time for two-wire command to finish before 4612 /*
4588 polling the status register */ 4613 * Wait appropriate time for two-wire command to finish before
4614 * polling the status register
4615 */
4589 msleep(1); 4616 msleep(1);
4590 4617
4591 /* Wait up to 500us for command complete status */ 4618 /* Wait up to 500us for command complete status */
4592 for (i = 0; i < 100; i++) { 4619 for (i = 0; i < 100; i++) {
4593 bnx2x_cl45_read(bp, phy, 4620 bnx2x_cl45_read(bp, phy,
4594 MDIO_PMA_DEVAD, 4621 MDIO_PMA_DEVAD,
4595 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); 4622 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
4596 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == 4623 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
4597 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) 4624 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE)
4598 break; 4625 break;
@@ -4604,21 +4631,21 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
4604 DP(NETIF_MSG_LINK, 4631 DP(NETIF_MSG_LINK,
4605 "Got bad status 0x%x when reading from SFP+ EEPROM\n", 4632 "Got bad status 0x%x when reading from SFP+ EEPROM\n",
4606 (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK)); 4633 (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK));
4607 return -EINVAL; 4634 return -EFAULT;
4608 } 4635 }
4609 4636
4610 /* Read the buffer */ 4637 /* Read the buffer */
4611 for (i = 0; i < byte_cnt; i++) { 4638 for (i = 0; i < byte_cnt; i++) {
4612 bnx2x_cl45_read(bp, phy, 4639 bnx2x_cl45_read(bp, phy,
4613 MDIO_PMA_DEVAD, 4640 MDIO_PMA_DEVAD,
4614 MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val); 4641 MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val);
4615 o_buf[i] = (u8)(val & MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK); 4642 o_buf[i] = (u8)(val & MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK);
4616 } 4643 }
4617 4644
4618 for (i = 0; i < 100; i++) { 4645 for (i = 0; i < 100; i++) {
4619 bnx2x_cl45_read(bp, phy, 4646 bnx2x_cl45_read(bp, phy,
4620 MDIO_PMA_DEVAD, 4647 MDIO_PMA_DEVAD,
4621 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); 4648 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
4622 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == 4649 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
4623 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE) 4650 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
4624 return 0; 4651 return 0;
@@ -4628,22 +4655,22 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
4628 return -EINVAL; 4655 return -EINVAL;
4629} 4656}
4630 4657
4631static u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy, 4658u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
4632 struct link_params *params, u16 addr, 4659 struct link_params *params, u16 addr,
4633 u8 byte_cnt, u8 *o_buf) 4660 u8 byte_cnt, u8 *o_buf)
4634{ 4661{
4635 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) 4662 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
4636 return bnx2x_8726_read_sfp_module_eeprom(phy, params, addr, 4663 return bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
4637 byte_cnt, o_buf); 4664 byte_cnt, o_buf);
4638 else if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) 4665 else if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727)
4639 return bnx2x_8727_read_sfp_module_eeprom(phy, params, addr, 4666 return bnx2x_8727_read_sfp_module_eeprom(phy, params, addr,
4640 byte_cnt, o_buf); 4667 byte_cnt, o_buf);
4641 return -EINVAL; 4668 return -EINVAL;
4642} 4669}
4643 4670
4644static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy, 4671static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy,
4645 struct link_params *params, 4672 struct link_params *params,
4646 u16 *edc_mode) 4673 u16 *edc_mode)
4647{ 4674{
4648 struct bnx2x *bp = params->bp; 4675 struct bnx2x *bp = params->bp;
4649 u8 val, check_limiting_mode = 0; 4676 u8 val, check_limiting_mode = 0;
@@ -4664,8 +4691,10 @@ static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy,
4664 { 4691 {
4665 u8 copper_module_type; 4692 u8 copper_module_type;
4666 4693
4667 /* Check if its active cable( includes SFP+ module) 4694 /*
4668 of passive cable*/ 4695 * Check if its active cable (includes SFP+ module)
4696 * of passive cable
4697 */
4669 if (bnx2x_read_sfp_module_eeprom(phy, 4698 if (bnx2x_read_sfp_module_eeprom(phy,
4670 params, 4699 params,
4671 SFP_EEPROM_FC_TX_TECH_ADDR, 4700 SFP_EEPROM_FC_TX_TECH_ADDR,
@@ -4724,8 +4753,10 @@ static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy,
4724 DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode); 4753 DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode);
4725 return 0; 4754 return 0;
4726} 4755}
4727/* This function read the relevant field from the module ( SFP+ ), 4756/*
4728 and verify it is compliant with this board */ 4757 * This function read the relevant field from the module (SFP+), and verify it
4758 * is compliant with this board
4759 */
4729static u8 bnx2x_verify_sfp_module(struct bnx2x_phy *phy, 4760static u8 bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
4730 struct link_params *params) 4761 struct link_params *params)
4731{ 4762{
@@ -4774,24 +4805,24 @@ static u8 bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
4774 /* format the warning message */ 4805 /* format the warning message */
4775 if (bnx2x_read_sfp_module_eeprom(phy, 4806 if (bnx2x_read_sfp_module_eeprom(phy,
4776 params, 4807 params,
4777 SFP_EEPROM_VENDOR_NAME_ADDR, 4808 SFP_EEPROM_VENDOR_NAME_ADDR,
4778 SFP_EEPROM_VENDOR_NAME_SIZE, 4809 SFP_EEPROM_VENDOR_NAME_SIZE,
4779 (u8 *)vendor_name)) 4810 (u8 *)vendor_name))
4780 vendor_name[0] = '\0'; 4811 vendor_name[0] = '\0';
4781 else 4812 else
4782 vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0'; 4813 vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0';
4783 if (bnx2x_read_sfp_module_eeprom(phy, 4814 if (bnx2x_read_sfp_module_eeprom(phy,
4784 params, 4815 params,
4785 SFP_EEPROM_PART_NO_ADDR, 4816 SFP_EEPROM_PART_NO_ADDR,
4786 SFP_EEPROM_PART_NO_SIZE, 4817 SFP_EEPROM_PART_NO_SIZE,
4787 (u8 *)vendor_pn)) 4818 (u8 *)vendor_pn))
4788 vendor_pn[0] = '\0'; 4819 vendor_pn[0] = '\0';
4789 else 4820 else
4790 vendor_pn[SFP_EEPROM_PART_NO_SIZE] = '\0'; 4821 vendor_pn[SFP_EEPROM_PART_NO_SIZE] = '\0';
4791 4822
4792 netdev_info(bp->dev, "Warning: Unqualified SFP+ module detected," 4823 netdev_err(bp->dev, "Warning: Unqualified SFP+ module detected,"
4793 " Port %d from %s part number %s\n", 4824 " Port %d from %s part number %s\n",
4794 params->port, vendor_name, vendor_pn); 4825 params->port, vendor_name, vendor_pn);
4795 phy->flags |= FLAGS_SFP_NOT_APPROVED; 4826 phy->flags |= FLAGS_SFP_NOT_APPROVED;
4796 return -EINVAL; 4827 return -EINVAL;
4797} 4828}
@@ -4803,8 +4834,11 @@ static u8 bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
4803 u8 val; 4834 u8 val;
4804 struct bnx2x *bp = params->bp; 4835 struct bnx2x *bp = params->bp;
4805 u16 timeout; 4836 u16 timeout;
4806 /* Initialization time after hot-plug may take up to 300ms for some 4837 /*
4807 phys type ( e.g. JDSU ) */ 4838 * Initialization time after hot-plug may take up to 300ms for
4839 * some phys type ( e.g. JDSU )
4840 */
4841
4808 for (timeout = 0; timeout < 60; timeout++) { 4842 for (timeout = 0; timeout < 60; timeout++) {
4809 if (bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val) 4843 if (bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val)
4810 == 0) { 4844 == 0) {
@@ -4823,16 +4857,14 @@ static void bnx2x_8727_power_module(struct bnx2x *bp,
4823 /* Make sure GPIOs are not using for LED mode */ 4857 /* Make sure GPIOs are not using for LED mode */
4824 u16 val; 4858 u16 val;
4825 /* 4859 /*
4826 * In the GPIO register, bit 4 is use to detemine if the GPIOs are 4860 * In the GPIO register, bit 4 is use to determine if the GPIOs are
4827 * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for 4861 * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for
4828 * output 4862 * output
4829 * Bits 0-1 determine the gpios value for OUTPUT in case bit 4 val is 0 4863 * Bits 0-1 determine the gpios value for OUTPUT in case bit 4 val is 0
4830 * Bits 8-9 determine the gpios value for INPUT in case bit 4 val is 1 4864 * Bits 8-9 determine the gpios value for INPUT in case bit 4 val is 1
4831 * where the 1st bit is the over-current(only input), and 2nd bit is 4865 * where the 1st bit is the over-current(only input), and 2nd bit is
4832 * for power( only output ) 4866 * for power( only output )
4833 */ 4867 *
4834
4835 /*
4836 * In case of NOC feature is disabled and power is up, set GPIO control 4868 * In case of NOC feature is disabled and power is up, set GPIO control
4837 * as input to enable listening of over-current indication 4869 * as input to enable listening of over-current indication
4838 */ 4870 */
@@ -4861,15 +4893,14 @@ static u8 bnx2x_8726_set_limiting_mode(struct bnx2x *bp,
4861 u16 cur_limiting_mode; 4893 u16 cur_limiting_mode;
4862 4894
4863 bnx2x_cl45_read(bp, phy, 4895 bnx2x_cl45_read(bp, phy,
4864 MDIO_PMA_DEVAD, 4896 MDIO_PMA_DEVAD,
4865 MDIO_PMA_REG_ROM_VER2, 4897 MDIO_PMA_REG_ROM_VER2,
4866 &cur_limiting_mode); 4898 &cur_limiting_mode);
4867 DP(NETIF_MSG_LINK, "Current Limiting mode is 0x%x\n", 4899 DP(NETIF_MSG_LINK, "Current Limiting mode is 0x%x\n",
4868 cur_limiting_mode); 4900 cur_limiting_mode);
4869 4901
4870 if (edc_mode == EDC_MODE_LIMITING) { 4902 if (edc_mode == EDC_MODE_LIMITING) {
4871 DP(NETIF_MSG_LINK, 4903 DP(NETIF_MSG_LINK, "Setting LIMITING MODE\n");
4872 "Setting LIMITING MODE\n");
4873 bnx2x_cl45_write(bp, phy, 4904 bnx2x_cl45_write(bp, phy,
4874 MDIO_PMA_DEVAD, 4905 MDIO_PMA_DEVAD,
4875 MDIO_PMA_REG_ROM_VER2, 4906 MDIO_PMA_REG_ROM_VER2,
@@ -4878,62 +4909,63 @@ static u8 bnx2x_8726_set_limiting_mode(struct bnx2x *bp,
4878 4909
4879 DP(NETIF_MSG_LINK, "Setting LRM MODE\n"); 4910 DP(NETIF_MSG_LINK, "Setting LRM MODE\n");
4880 4911
4881 /* Changing to LRM mode takes quite few seconds. 4912 /*
4882 So do it only if current mode is limiting 4913 * Changing to LRM mode takes quite few seconds. So do it only
4883 ( default is LRM )*/ 4914 * if current mode is limiting (default is LRM)
4915 */
4884 if (cur_limiting_mode != EDC_MODE_LIMITING) 4916 if (cur_limiting_mode != EDC_MODE_LIMITING)
4885 return 0; 4917 return 0;
4886 4918
4887 bnx2x_cl45_write(bp, phy, 4919 bnx2x_cl45_write(bp, phy,
4888 MDIO_PMA_DEVAD, 4920 MDIO_PMA_DEVAD,
4889 MDIO_PMA_REG_LRM_MODE, 4921 MDIO_PMA_REG_LRM_MODE,
4890 0); 4922 0);
4891 bnx2x_cl45_write(bp, phy, 4923 bnx2x_cl45_write(bp, phy,
4892 MDIO_PMA_DEVAD, 4924 MDIO_PMA_DEVAD,
4893 MDIO_PMA_REG_ROM_VER2, 4925 MDIO_PMA_REG_ROM_VER2,
4894 0x128); 4926 0x128);
4895 bnx2x_cl45_write(bp, phy, 4927 bnx2x_cl45_write(bp, phy,
4896 MDIO_PMA_DEVAD, 4928 MDIO_PMA_DEVAD,
4897 MDIO_PMA_REG_MISC_CTRL0, 4929 MDIO_PMA_REG_MISC_CTRL0,
4898 0x4008); 4930 0x4008);
4899 bnx2x_cl45_write(bp, phy, 4931 bnx2x_cl45_write(bp, phy,
4900 MDIO_PMA_DEVAD, 4932 MDIO_PMA_DEVAD,
4901 MDIO_PMA_REG_LRM_MODE, 4933 MDIO_PMA_REG_LRM_MODE,
4902 0xaaaa); 4934 0xaaaa);
4903 } 4935 }
4904 return 0; 4936 return 0;
4905} 4937}
4906 4938
4907static u8 bnx2x_8727_set_limiting_mode(struct bnx2x *bp, 4939static u8 bnx2x_8727_set_limiting_mode(struct bnx2x *bp,
4908 struct bnx2x_phy *phy, 4940 struct bnx2x_phy *phy,
4909 u16 edc_mode) 4941 u16 edc_mode)
4910{ 4942{
4911 u16 phy_identifier; 4943 u16 phy_identifier;
4912 u16 rom_ver2_val; 4944 u16 rom_ver2_val;
4913 bnx2x_cl45_read(bp, phy, 4945 bnx2x_cl45_read(bp, phy,
4914 MDIO_PMA_DEVAD, 4946 MDIO_PMA_DEVAD,
4915 MDIO_PMA_REG_PHY_IDENTIFIER, 4947 MDIO_PMA_REG_PHY_IDENTIFIER,
4916 &phy_identifier); 4948 &phy_identifier);
4917 4949
4918 bnx2x_cl45_write(bp, phy, 4950 bnx2x_cl45_write(bp, phy,
4919 MDIO_PMA_DEVAD, 4951 MDIO_PMA_DEVAD,
4920 MDIO_PMA_REG_PHY_IDENTIFIER, 4952 MDIO_PMA_REG_PHY_IDENTIFIER,
4921 (phy_identifier & ~(1<<9))); 4953 (phy_identifier & ~(1<<9)));
4922 4954
4923 bnx2x_cl45_read(bp, phy, 4955 bnx2x_cl45_read(bp, phy,
4924 MDIO_PMA_DEVAD, 4956 MDIO_PMA_DEVAD,
4925 MDIO_PMA_REG_ROM_VER2, 4957 MDIO_PMA_REG_ROM_VER2,
4926 &rom_ver2_val); 4958 &rom_ver2_val);
4927 /* Keep the MSB 8-bits, and set the LSB 8-bits with the edc_mode */ 4959 /* Keep the MSB 8-bits, and set the LSB 8-bits with the edc_mode */
4928 bnx2x_cl45_write(bp, phy, 4960 bnx2x_cl45_write(bp, phy,
4929 MDIO_PMA_DEVAD, 4961 MDIO_PMA_DEVAD,
4930 MDIO_PMA_REG_ROM_VER2, 4962 MDIO_PMA_REG_ROM_VER2,
4931 (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff)); 4963 (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff));
4932 4964
4933 bnx2x_cl45_write(bp, phy, 4965 bnx2x_cl45_write(bp, phy,
4934 MDIO_PMA_DEVAD, 4966 MDIO_PMA_DEVAD,
4935 MDIO_PMA_REG_PHY_IDENTIFIER, 4967 MDIO_PMA_REG_PHY_IDENTIFIER,
4936 (phy_identifier | (1<<9))); 4968 (phy_identifier | (1<<9)));
4937 4969
4938 return 0; 4970 return 0;
4939} 4971}
@@ -4946,11 +4978,11 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
4946 4978
4947 switch (action) { 4979 switch (action) {
4948 case DISABLE_TX: 4980 case DISABLE_TX:
4949 bnx2x_sfp_set_transmitter(bp, phy, params->port, 0); 4981 bnx2x_sfp_set_transmitter(params, phy, 0);
4950 break; 4982 break;
4951 case ENABLE_TX: 4983 case ENABLE_TX:
4952 if (!(phy->flags & FLAGS_SFP_NOT_APPROVED)) 4984 if (!(phy->flags & FLAGS_SFP_NOT_APPROVED))
4953 bnx2x_sfp_set_transmitter(bp, phy, params->port, 1); 4985 bnx2x_sfp_set_transmitter(params, phy, 1);
4954 break; 4986 break;
4955 default: 4987 default:
4956 DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n", 4988 DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n",
@@ -4959,6 +4991,38 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
4959 } 4991 }
4960} 4992}
4961 4993
4994static void bnx2x_set_sfp_module_fault_led(struct link_params *params,
4995 u8 gpio_mode)
4996{
4997 struct bnx2x *bp = params->bp;
4998
4999 u32 fault_led_gpio = REG_RD(bp, params->shmem_base +
5000 offsetof(struct shmem_region,
5001 dev_info.port_hw_config[params->port].sfp_ctrl)) &
5002 PORT_HW_CFG_FAULT_MODULE_LED_MASK;
5003 switch (fault_led_gpio) {
5004 case PORT_HW_CFG_FAULT_MODULE_LED_DISABLED:
5005 return;
5006 case PORT_HW_CFG_FAULT_MODULE_LED_GPIO0:
5007 case PORT_HW_CFG_FAULT_MODULE_LED_GPIO1:
5008 case PORT_HW_CFG_FAULT_MODULE_LED_GPIO2:
5009 case PORT_HW_CFG_FAULT_MODULE_LED_GPIO3:
5010 {
5011 u8 gpio_port = bnx2x_get_gpio_port(params);
5012 u16 gpio_pin = fault_led_gpio -
5013 PORT_HW_CFG_FAULT_MODULE_LED_GPIO0;
5014 DP(NETIF_MSG_LINK, "Set fault module-detected led "
5015 "pin %x port %x mode %x\n",
5016 gpio_pin, gpio_port, gpio_mode);
5017 bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port);
5018 }
5019 break;
5020 default:
5021 DP(NETIF_MSG_LINK, "Error: Invalid fault led mode 0x%x\n",
5022 fault_led_gpio);
5023 }
5024}
5025
4962static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy, 5026static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
4963 struct link_params *params) 5027 struct link_params *params)
4964{ 5028{
@@ -4976,15 +5040,14 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
4976 if (bnx2x_get_edc_mode(phy, params, &edc_mode) != 0) { 5040 if (bnx2x_get_edc_mode(phy, params, &edc_mode) != 0) {
4977 DP(NETIF_MSG_LINK, "Failed to get valid module type\n"); 5041 DP(NETIF_MSG_LINK, "Failed to get valid module type\n");
4978 return -EINVAL; 5042 return -EINVAL;
4979 } else if (bnx2x_verify_sfp_module(phy, params) != 5043 } else if (bnx2x_verify_sfp_module(phy, params) != 0) {
4980 0) {
4981 /* check SFP+ module compatibility */ 5044 /* check SFP+ module compatibility */
4982 DP(NETIF_MSG_LINK, "Module verification failed!!\n"); 5045 DP(NETIF_MSG_LINK, "Module verification failed!!\n");
4983 rc = -EINVAL; 5046 rc = -EINVAL;
4984 /* Turn on fault module-detected led */ 5047 /* Turn on fault module-detected led */
4985 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, 5048 bnx2x_set_sfp_module_fault_led(params,
4986 MISC_REGISTERS_GPIO_HIGH, 5049 MISC_REGISTERS_GPIO_HIGH);
4987 params->port); 5050
4988 if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) && 5051 if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) &&
4989 ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == 5052 ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
4990 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN)) { 5053 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN)) {
@@ -4995,18 +5058,17 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
4995 } 5058 }
4996 } else { 5059 } else {
4997 /* Turn off fault module-detected led */ 5060 /* Turn off fault module-detected led */
4998 DP(NETIF_MSG_LINK, "Turn off fault module-detected led\n"); 5061 bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_LOW);
4999 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
5000 MISC_REGISTERS_GPIO_LOW,
5001 params->port);
5002 } 5062 }
5003 5063
5004 /* power up the SFP module */ 5064 /* power up the SFP module */
5005 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) 5065 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727)
5006 bnx2x_8727_power_module(bp, phy, 1); 5066 bnx2x_8727_power_module(bp, phy, 1);
5007 5067
5008 /* Check and set limiting mode / LRM mode on 8726. 5068 /*
5009 On 8727 it is done automatically */ 5069 * Check and set limiting mode / LRM mode on 8726. On 8727 it
5070 * is done automatically
5071 */
5010 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) 5072 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
5011 bnx2x_8726_set_limiting_mode(bp, phy, edc_mode); 5073 bnx2x_8726_set_limiting_mode(bp, phy, edc_mode);
5012 else 5074 else
@@ -5018,9 +5080,9 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
5018 if (rc == 0 || 5080 if (rc == 0 ||
5019 (val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) != 5081 (val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) !=
5020 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) 5082 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
5021 bnx2x_sfp_set_transmitter(bp, phy, params->port, 1); 5083 bnx2x_sfp_set_transmitter(params, phy, 1);
5022 else 5084 else
5023 bnx2x_sfp_set_transmitter(bp, phy, params->port, 0); 5085 bnx2x_sfp_set_transmitter(params, phy, 0);
5024 5086
5025 return rc; 5087 return rc;
5026} 5088}
@@ -5033,11 +5095,9 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
5033 u8 port = params->port; 5095 u8 port = params->port;
5034 5096
5035 /* Set valid module led off */ 5097 /* Set valid module led off */
5036 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, 5098 bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_HIGH);
5037 MISC_REGISTERS_GPIO_HIGH,
5038 params->port);
5039 5099
5040 /* Get current gpio val refelecting module plugged in / out*/ 5100 /* Get current gpio val reflecting module plugged in / out*/
5041 gpio_val = bnx2x_get_gpio(bp, MISC_REGISTERS_GPIO_3, port); 5101 gpio_val = bnx2x_get_gpio(bp, MISC_REGISTERS_GPIO_3, port);
5042 5102
5043 /* Call the handling function in case module is detected */ 5103 /* Call the handling function in case module is detected */
@@ -5053,18 +5113,20 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
5053 DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n"); 5113 DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
5054 } else { 5114 } else {
5055 u32 val = REG_RD(bp, params->shmem_base + 5115 u32 val = REG_RD(bp, params->shmem_base +
5056 offsetof(struct shmem_region, dev_info. 5116 offsetof(struct shmem_region, dev_info.
5057 port_feature_config[params->port]. 5117 port_feature_config[params->port].
5058 config)); 5118 config));
5059 5119
5060 bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3, 5120 bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3,
5061 MISC_REGISTERS_GPIO_INT_OUTPUT_SET, 5121 MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
5062 port); 5122 port);
5063 /* Module was plugged out. */ 5123 /*
5064 /* Disable transmit for this module */ 5124 * Module was plugged out.
5125 * Disable transmit for this module
5126 */
5065 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == 5127 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
5066 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) 5128 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
5067 bnx2x_sfp_set_transmitter(bp, phy, params->port, 0); 5129 bnx2x_sfp_set_transmitter(params, phy, 0);
5068 } 5130 }
5069} 5131}
5070 5132
@@ -5100,9 +5162,9 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
5100 5162
5101 DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps" 5163 DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps"
5102 " link_status 0x%x\n", rx_sd, pcs_status, val2); 5164 " link_status 0x%x\n", rx_sd, pcs_status, val2);
5103 /* link is up if both bit 0 of pmd_rx_sd and 5165 /*
5104 * bit 0 of pcs_status are set, or if the autoneg bit 5166 * link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status
5105 * 1 is set 5167 * are set, or if the autoneg bit 1 is set
5106 */ 5168 */
5107 link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1))); 5169 link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1)));
5108 if (link_up) { 5170 if (link_up) {
@@ -5123,14 +5185,15 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
5123 struct link_params *params, 5185 struct link_params *params,
5124 struct link_vars *vars) 5186 struct link_vars *vars)
5125{ 5187{
5126 u16 cnt, val; 5188 u32 tx_en_mode;
5189 u16 cnt, val, tmp1;
5127 struct bnx2x *bp = params->bp; 5190 struct bnx2x *bp = params->bp;
5128 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 5191 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
5129 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); 5192 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
5130 /* HW reset */ 5193 /* HW reset */
5131 bnx2x_ext_phy_hw_reset(bp, params->port); 5194 bnx2x_ext_phy_hw_reset(bp, params->port);
5132 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040); 5195 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
5133 bnx2x_wait_reset_complete(bp, phy); 5196 bnx2x_wait_reset_complete(bp, phy, params);
5134 5197
5135 /* Wait until fw is loaded */ 5198 /* Wait until fw is loaded */
5136 for (cnt = 0; cnt < 100; cnt++) { 5199 for (cnt = 0; cnt < 100; cnt++) {
@@ -5197,6 +5260,26 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
5197 0x0004); 5260 0x0004);
5198 } 5261 }
5199 bnx2x_save_bcm_spirom_ver(bp, phy, params->port); 5262 bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
5263
5264 /*
5265 * If TX Laser is controlled by GPIO_0, do not let PHY go into low
5266 * power mode, if TX Laser is disabled
5267 */
5268
5269 tx_en_mode = REG_RD(bp, params->shmem_base +
5270 offsetof(struct shmem_region,
5271 dev_info.port_hw_config[params->port].sfp_ctrl))
5272 & PORT_HW_CFG_TX_LASER_MASK;
5273
5274 if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) {
5275 DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n");
5276 bnx2x_cl45_read(bp, phy,
5277 MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, &tmp1);
5278 tmp1 |= 0x1;
5279 bnx2x_cl45_write(bp, phy,
5280 MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, tmp1);
5281 }
5282
5200 return 0; 5283 return 0;
5201} 5284}
5202 5285
@@ -5231,26 +5314,26 @@ static void bnx2x_8726_external_rom_boot(struct bnx2x_phy *phy,
5231 5314
5232 /* Set soft reset */ 5315 /* Set soft reset */
5233 bnx2x_cl45_write(bp, phy, 5316 bnx2x_cl45_write(bp, phy,
5234 MDIO_PMA_DEVAD, 5317 MDIO_PMA_DEVAD,
5235 MDIO_PMA_REG_GEN_CTRL, 5318 MDIO_PMA_REG_GEN_CTRL,
5236 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET); 5319 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
5237 5320
5238 bnx2x_cl45_write(bp, phy, 5321 bnx2x_cl45_write(bp, phy,
5239 MDIO_PMA_DEVAD, 5322 MDIO_PMA_DEVAD,
5240 MDIO_PMA_REG_MISC_CTRL1, 0x0001); 5323 MDIO_PMA_REG_MISC_CTRL1, 0x0001);
5241 5324
5242 bnx2x_cl45_write(bp, phy, 5325 bnx2x_cl45_write(bp, phy,
5243 MDIO_PMA_DEVAD, 5326 MDIO_PMA_DEVAD,
5244 MDIO_PMA_REG_GEN_CTRL, 5327 MDIO_PMA_REG_GEN_CTRL,
5245 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); 5328 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
5246 5329
5247 /* wait for 150ms for microcode load */ 5330 /* wait for 150ms for microcode load */
5248 msleep(150); 5331 msleep(150);
5249 5332
5250 /* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */ 5333 /* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */
5251 bnx2x_cl45_write(bp, phy, 5334 bnx2x_cl45_write(bp, phy,
5252 MDIO_PMA_DEVAD, 5335 MDIO_PMA_DEVAD,
5253 MDIO_PMA_REG_MISC_CTRL1, 0x0000); 5336 MDIO_PMA_REG_MISC_CTRL1, 0x0000);
5254 5337
5255 msleep(200); 5338 msleep(200);
5256 bnx2x_save_bcm_spirom_ver(bp, phy, params->port); 5339 bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
@@ -5285,23 +5368,18 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy,
5285 u32 val; 5368 u32 val;
5286 u32 swap_val, swap_override, aeu_gpio_mask, offset; 5369 u32 swap_val, swap_override, aeu_gpio_mask, offset;
5287 DP(NETIF_MSG_LINK, "Initializing BCM8726\n"); 5370 DP(NETIF_MSG_LINK, "Initializing BCM8726\n");
5288 /* Restore normal power mode*/
5289 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
5290 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
5291
5292 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
5293 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
5294 5371
5295 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); 5372 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
5296 bnx2x_wait_reset_complete(bp, phy); 5373 bnx2x_wait_reset_complete(bp, phy, params);
5297 5374
5298 bnx2x_8726_external_rom_boot(phy, params); 5375 bnx2x_8726_external_rom_boot(phy, params);
5299 5376
5300 /* Need to call module detected on initialization since 5377 /*
5301 the module detection triggered by actual module 5378 * Need to call module detected on initialization since the module
5302 insertion might occur before driver is loaded, and when 5379 * detection triggered by actual module insertion might occur before
5303 driver is loaded, it reset all registers, including the 5380 * driver is loaded, and when driver is loaded, it reset all
5304 transmitter */ 5381 * registers, including the transmitter
5382 */
5305 bnx2x_sfp_module_detection(phy, params); 5383 bnx2x_sfp_module_detection(phy, params);
5306 5384
5307 if (phy->req_line_speed == SPEED_1000) { 5385 if (phy->req_line_speed == SPEED_1000) {
@@ -5334,8 +5412,10 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy,
5334 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000); 5412 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
5335 bnx2x_cl45_write(bp, phy, 5413 bnx2x_cl45_write(bp, phy,
5336 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200); 5414 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
5337 /* Enable RX-ALARM control to receive 5415 /*
5338 interrupt for 1G speed change */ 5416 * Enable RX-ALARM control to receive interrupt for 1G speed
5417 * change
5418 */
5339 bnx2x_cl45_write(bp, phy, 5419 bnx2x_cl45_write(bp, phy,
5340 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x4); 5420 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x4);
5341 bnx2x_cl45_write(bp, phy, 5421 bnx2x_cl45_write(bp, phy,
@@ -5367,7 +5447,7 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy,
5367 5447
5368 /* Set GPIO3 to trigger SFP+ module insertion/removal */ 5448 /* Set GPIO3 to trigger SFP+ module insertion/removal */
5369 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, 5449 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5370 MISC_REGISTERS_GPIO_INPUT_HI_Z, params->port); 5450 MISC_REGISTERS_GPIO_INPUT_HI_Z, params->port);
5371 5451
5372 /* The GPIO should be swapped if the swap register is set and active */ 5452 /* The GPIO should be swapped if the swap register is set and active */
5373 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); 5453 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
@@ -5458,7 +5538,7 @@ static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy,
5458 struct link_params *params) { 5538 struct link_params *params) {
5459 u32 swap_val, swap_override; 5539 u32 swap_val, swap_override;
5460 u8 port; 5540 u8 port;
5461 /** 5541 /*
5462 * The PHY reset is controlled by GPIO 1. Fake the port number 5542 * The PHY reset is controlled by GPIO 1. Fake the port number
5463 * to cancel the swap done in set_gpio() 5543 * to cancel the swap done in set_gpio()
5464 */ 5544 */
@@ -5467,20 +5547,21 @@ static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy,
5467 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); 5547 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5468 port = (swap_val && swap_override) ^ 1; 5548 port = (swap_val && swap_override) ^ 1;
5469 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 5549 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
5470 MISC_REGISTERS_GPIO_OUTPUT_LOW, port); 5550 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
5471} 5551}
5472 5552
5473static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy, 5553static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
5474 struct link_params *params, 5554 struct link_params *params,
5475 struct link_vars *vars) 5555 struct link_vars *vars)
5476{ 5556{
5477 u16 tmp1, val, mod_abs; 5557 u32 tx_en_mode;
5558 u16 tmp1, val, mod_abs, tmp2;
5478 u16 rx_alarm_ctrl_val; 5559 u16 rx_alarm_ctrl_val;
5479 u16 lasi_ctrl_val; 5560 u16 lasi_ctrl_val;
5480 struct bnx2x *bp = params->bp; 5561 struct bnx2x *bp = params->bp;
5481 /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */ 5562 /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */
5482 5563
5483 bnx2x_wait_reset_complete(bp, phy); 5564 bnx2x_wait_reset_complete(bp, phy, params);
5484 rx_alarm_ctrl_val = (1<<2) | (1<<5) ; 5565 rx_alarm_ctrl_val = (1<<2) | (1<<5) ;
5485 lasi_ctrl_val = 0x0004; 5566 lasi_ctrl_val = 0x0004;
5486 5567
@@ -5493,14 +5574,17 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
5493 bnx2x_cl45_write(bp, phy, 5574 bnx2x_cl45_write(bp, phy,
5494 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, lasi_ctrl_val); 5575 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, lasi_ctrl_val);
5495 5576
5496 /* Initially configure MOD_ABS to interrupt when 5577 /*
5497 module is presence( bit 8) */ 5578 * Initially configure MOD_ABS to interrupt when module is
5579 * presence( bit 8)
5580 */
5498 bnx2x_cl45_read(bp, phy, 5581 bnx2x_cl45_read(bp, phy,
5499 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs); 5582 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
5500 /* Set EDC off by setting OPTXLOS signal input to low 5583 /*
5501 (bit 9). 5584 * Set EDC off by setting OPTXLOS signal input to low (bit 9).
5502 When the EDC is off it locks onto a reference clock and 5585 * When the EDC is off it locks onto a reference clock and avoids
5503 avoids becoming 'lost'.*/ 5586 * becoming 'lost'
5587 */
5504 mod_abs &= ~(1<<8); 5588 mod_abs &= ~(1<<8);
5505 if (!(phy->flags & FLAGS_NOC)) 5589 if (!(phy->flags & FLAGS_NOC))
5506 mod_abs &= ~(1<<9); 5590 mod_abs &= ~(1<<9);
@@ -5515,7 +5599,7 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
5515 if (phy->flags & FLAGS_NOC) 5599 if (phy->flags & FLAGS_NOC)
5516 val |= (3<<5); 5600 val |= (3<<5);
5517 5601
5518 /** 5602 /*
5519 * Set 8727 GPIOs to input to allow reading from the 8727 GPIO0 5603 * Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
5520 * status which reflect SFP+ module over-current 5604 * status which reflect SFP+ module over-current
5521 */ 5605 */
@@ -5542,7 +5626,7 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
5542 bnx2x_cl45_read(bp, phy, 5626 bnx2x_cl45_read(bp, phy,
5543 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1); 5627 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
5544 DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1); 5628 DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
5545 /** 5629 /*
5546 * Power down the XAUI until link is up in case of dual-media 5630 * Power down the XAUI until link is up in case of dual-media
5547 * and 1G 5631 * and 1G
5548 */ 5632 */
@@ -5568,7 +5652,7 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
5568 bnx2x_cl45_write(bp, phy, 5652 bnx2x_cl45_write(bp, phy,
5569 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300); 5653 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300);
5570 } else { 5654 } else {
5571 /** 5655 /*
5572 * Since the 8727 has only single reset pin, need to set the 10G 5656 * Since the 8727 has only single reset pin, need to set the 10G
5573 * registers although it is default 5657 * registers although it is default
5574 */ 5658 */
@@ -5584,7 +5668,8 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
5584 0x0008); 5668 0x0008);
5585 } 5669 }
5586 5670
5587 /* Set 2-wire transfer rate of SFP+ module EEPROM 5671 /*
5672 * Set 2-wire transfer rate of SFP+ module EEPROM
5588 * to 100Khz since some DACs(direct attached cables) do 5673 * to 100Khz since some DACs(direct attached cables) do
5589 * not work at 400Khz. 5674 * not work at 400Khz.
5590 */ 5675 */
@@ -5607,6 +5692,26 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
5607 phy->tx_preemphasis[1]); 5692 phy->tx_preemphasis[1]);
5608 } 5693 }
5609 5694
5695 /*
5696 * If TX Laser is controlled by GPIO_0, do not let PHY go into low
5697 * power mode, if TX Laser is disabled
5698 */
5699 tx_en_mode = REG_RD(bp, params->shmem_base +
5700 offsetof(struct shmem_region,
5701 dev_info.port_hw_config[params->port].sfp_ctrl))
5702 & PORT_HW_CFG_TX_LASER_MASK;
5703
5704 if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) {
5705
5706 DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n");
5707 bnx2x_cl45_read(bp, phy,
5708 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, &tmp2);
5709 tmp2 |= 0x1000;
5710 tmp2 &= 0xFFEF;
5711 bnx2x_cl45_write(bp, phy,
5712 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, tmp2);
5713 }
5714
5610 return 0; 5715 return 0;
5611} 5716}
5612 5717
@@ -5620,46 +5725,49 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
5620 port_feature_config[params->port]. 5725 port_feature_config[params->port].
5621 config)); 5726 config));
5622 bnx2x_cl45_read(bp, phy, 5727 bnx2x_cl45_read(bp, phy,
5623 MDIO_PMA_DEVAD, 5728 MDIO_PMA_DEVAD,
5624 MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs); 5729 MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
5625 if (mod_abs & (1<<8)) { 5730 if (mod_abs & (1<<8)) {
5626 5731
5627 /* Module is absent */ 5732 /* Module is absent */
5628 DP(NETIF_MSG_LINK, "MOD_ABS indication " 5733 DP(NETIF_MSG_LINK, "MOD_ABS indication "
5629 "show module is absent\n"); 5734 "show module is absent\n");
5630 5735
5631 /* 1. Set mod_abs to detect next module 5736 /*
5632 presence event 5737 * 1. Set mod_abs to detect next module
5633 2. Set EDC off by setting OPTXLOS signal input to low 5738 * presence event
5634 (bit 9). 5739 * 2. Set EDC off by setting OPTXLOS signal input to low
5635 When the EDC is off it locks onto a reference clock and 5740 * (bit 9).
5636 avoids becoming 'lost'.*/ 5741 * When the EDC is off it locks onto a reference clock and
5742 * avoids becoming 'lost'.
5743 */
5637 mod_abs &= ~(1<<8); 5744 mod_abs &= ~(1<<8);
5638 if (!(phy->flags & FLAGS_NOC)) 5745 if (!(phy->flags & FLAGS_NOC))
5639 mod_abs &= ~(1<<9); 5746 mod_abs &= ~(1<<9);
5640 bnx2x_cl45_write(bp, phy, 5747 bnx2x_cl45_write(bp, phy,
5641 MDIO_PMA_DEVAD, 5748 MDIO_PMA_DEVAD,
5642 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); 5749 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
5643 5750
5644 /* Clear RX alarm since it stays up as long as 5751 /*
5645 the mod_abs wasn't changed */ 5752 * Clear RX alarm since it stays up as long as
5753 * the mod_abs wasn't changed
5754 */
5646 bnx2x_cl45_read(bp, phy, 5755 bnx2x_cl45_read(bp, phy,
5647 MDIO_PMA_DEVAD, 5756 MDIO_PMA_DEVAD,
5648 MDIO_PMA_REG_RX_ALARM, &rx_alarm_status); 5757 MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
5649 5758
5650 } else { 5759 } else {
5651 /* Module is present */ 5760 /* Module is present */
5652 DP(NETIF_MSG_LINK, "MOD_ABS indication " 5761 DP(NETIF_MSG_LINK, "MOD_ABS indication "
5653 "show module is present\n"); 5762 "show module is present\n");
5654 /* First thing, disable transmitter, 5763 /*
5655 and if the module is ok, the 5764 * First disable transmitter, and if the module is ok, the
5656 module_detection will enable it*/ 5765 * module_detection will enable it
5657 5766 * 1. Set mod_abs to detect next module absent event ( bit 8)
5658 /* 1. Set mod_abs to detect next module 5767 * 2. Restore the default polarity of the OPRXLOS signal and
5659 absent event ( bit 8) 5768 * this signal will then correctly indicate the presence or
5660 2. Restore the default polarity of the OPRXLOS signal and 5769 * absence of the Rx signal. (bit 9)
5661 this signal will then correctly indicate the presence or 5770 */
5662 absence of the Rx signal. (bit 9) */
5663 mod_abs |= (1<<8); 5771 mod_abs |= (1<<8);
5664 if (!(phy->flags & FLAGS_NOC)) 5772 if (!(phy->flags & FLAGS_NOC))
5665 mod_abs |= (1<<9); 5773 mod_abs |= (1<<9);
@@ -5667,10 +5775,12 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
5667 MDIO_PMA_DEVAD, 5775 MDIO_PMA_DEVAD,
5668 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); 5776 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
5669 5777
5670 /* Clear RX alarm since it stays up as long as 5778 /*
5671 the mod_abs wasn't changed. This is need to be done 5779 * Clear RX alarm since it stays up as long as the mod_abs
5672 before calling the module detection, otherwise it will clear 5780 * wasn't changed. This is need to be done before calling the
5673 the link update alarm */ 5781 * module detection, otherwise it will clear* the link update
5782 * alarm
5783 */
5674 bnx2x_cl45_read(bp, phy, 5784 bnx2x_cl45_read(bp, phy,
5675 MDIO_PMA_DEVAD, 5785 MDIO_PMA_DEVAD,
5676 MDIO_PMA_REG_RX_ALARM, &rx_alarm_status); 5786 MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
@@ -5678,7 +5788,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
5678 5788
5679 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == 5789 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
5680 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) 5790 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
5681 bnx2x_sfp_set_transmitter(bp, phy, params->port, 0); 5791 bnx2x_sfp_set_transmitter(params, phy, 0);
5682 5792
5683 if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0) 5793 if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0)
5684 bnx2x_sfp_module_detection(phy, params); 5794 bnx2x_sfp_module_detection(phy, params);
@@ -5687,9 +5797,8 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
5687 } 5797 }
5688 5798
5689 DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n", 5799 DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n",
5690 rx_alarm_status); 5800 rx_alarm_status);
5691 /* No need to check link status in case of 5801 /* No need to check link status in case of module plugged in/out */
5692 module plugged in/out */
5693} 5802}
5694 5803
5695static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy, 5804static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
@@ -5725,7 +5834,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
5725 bnx2x_cl45_read(bp, phy, 5834 bnx2x_cl45_read(bp, phy,
5726 MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1); 5835 MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
5727 5836
5728 /** 5837 /*
5729 * If a module is present and there is need to check 5838 * If a module is present and there is need to check
5730 * for over current 5839 * for over current
5731 */ 5840 */
@@ -5745,12 +5854,8 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
5745 " Please remove the SFP+ module and" 5854 " Please remove the SFP+ module and"
5746 " restart the system to clear this" 5855 " restart the system to clear this"
5747 " error.\n", 5856 " error.\n",
5748 params->port); 5857 params->port);
5749 5858 /* Disable all RX_ALARMs except for mod_abs */
5750 /*
5751 * Disable all RX_ALARMs except for
5752 * mod_abs
5753 */
5754 bnx2x_cl45_write(bp, phy, 5859 bnx2x_cl45_write(bp, phy,
5755 MDIO_PMA_DEVAD, 5860 MDIO_PMA_DEVAD,
5756 MDIO_PMA_REG_RX_ALARM_CTRL, (1<<5)); 5861 MDIO_PMA_REG_RX_ALARM_CTRL, (1<<5));
@@ -5793,11 +5898,15 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
5793 MDIO_PMA_DEVAD, 5898 MDIO_PMA_DEVAD,
5794 MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status); 5899 MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status);
5795 5900
5796 /* Bits 0..2 --> speed detected, 5901 /*
5797 bits 13..15--> link is down */ 5902 * Bits 0..2 --> speed detected,
5903 * Bits 13..15--> link is down
5904 */
5798 if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) { 5905 if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) {
5799 link_up = 1; 5906 link_up = 1;
5800 vars->line_speed = SPEED_10000; 5907 vars->line_speed = SPEED_10000;
5908 DP(NETIF_MSG_LINK, "port %x: External link up in 10G\n",
5909 params->port);
5801 } else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) { 5910 } else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) {
5802 link_up = 1; 5911 link_up = 1;
5803 vars->line_speed = SPEED_1000; 5912 vars->line_speed = SPEED_1000;
@@ -5819,7 +5928,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
5819 bnx2x_cl45_read(bp, phy, 5928 bnx2x_cl45_read(bp, phy,
5820 MDIO_PMA_DEVAD, 5929 MDIO_PMA_DEVAD,
5821 MDIO_PMA_REG_8727_PCS_GP, &val1); 5930 MDIO_PMA_REG_8727_PCS_GP, &val1);
5822 /** 5931 /*
5823 * In case of dual-media board and 1G, power up the XAUI side, 5932 * In case of dual-media board and 1G, power up the XAUI side,
5824 * otherwise power it down. For 10G it is done automatically 5933 * otherwise power it down. For 10G it is done automatically
5825 */ 5934 */
@@ -5839,7 +5948,7 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
5839{ 5948{
5840 struct bnx2x *bp = params->bp; 5949 struct bnx2x *bp = params->bp;
5841 /* Disable Transmitter */ 5950 /* Disable Transmitter */
5842 bnx2x_sfp_set_transmitter(bp, phy, params->port, 0); 5951 bnx2x_sfp_set_transmitter(params, phy, 0);
5843 /* Clear LASI */ 5952 /* Clear LASI */
5844 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0); 5953 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0);
5845 5954
@@ -5851,19 +5960,23 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
5851static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy, 5960static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
5852 struct link_params *params) 5961 struct link_params *params)
5853{ 5962{
5854 u16 val, fw_ver1, fw_ver2, cnt; 5963 u16 val, fw_ver1, fw_ver2, cnt, adj;
5855 struct bnx2x *bp = params->bp; 5964 struct bnx2x *bp = params->bp;
5856 5965
5966 adj = 0;
5967 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
5968 adj = -1;
5969
5857 /* For the 32 bits registers in 848xx, access via MDIO2ARM interface.*/ 5970 /* For the 32 bits registers in 848xx, access via MDIO2ARM interface.*/
5858 /* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */ 5971 /* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
5859 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014); 5972 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819 + adj, 0x0014);
5860 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200); 5973 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A + adj, 0xc200);
5861 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000); 5974 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B + adj, 0x0000);
5862 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300); 5975 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C + adj, 0x0300);
5863 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009); 5976 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817 + adj, 0x0009);
5864 5977
5865 for (cnt = 0; cnt < 100; cnt++) { 5978 for (cnt = 0; cnt < 100; cnt++) {
5866 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val); 5979 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818 + adj, &val);
5867 if (val & 1) 5980 if (val & 1)
5868 break; 5981 break;
5869 udelay(5); 5982 udelay(5);
@@ -5877,11 +5990,11 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
5877 5990
5878 5991
5879 /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */ 5992 /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */
5880 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000); 5993 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819 + adj, 0x0000);
5881 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200); 5994 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A + adj, 0xc200);
5882 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A); 5995 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817 + adj, 0x000A);
5883 for (cnt = 0; cnt < 100; cnt++) { 5996 for (cnt = 0; cnt < 100; cnt++) {
5884 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val); 5997 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818 + adj, &val);
5885 if (val & 1) 5998 if (val & 1)
5886 break; 5999 break;
5887 udelay(5); 6000 udelay(5);
@@ -5894,9 +6007,9 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
5894 } 6007 }
5895 6008
5896 /* lower 16 bits of the register SPI_FW_STATUS */ 6009 /* lower 16 bits of the register SPI_FW_STATUS */
5897 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1); 6010 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B + adj, &fw_ver1);
5898 /* upper 16 bits of register SPI_FW_STATUS */ 6011 /* upper 16 bits of register SPI_FW_STATUS */
5899 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2); 6012 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C + adj, &fw_ver2);
5900 6013
5901 bnx2x_save_spirom_version(bp, params->port, (fw_ver2<<16) | fw_ver1, 6014 bnx2x_save_spirom_version(bp, params->port, (fw_ver2<<16) | fw_ver1,
5902 phy->ver_addr); 6015 phy->ver_addr);
@@ -5905,49 +6018,53 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
5905static void bnx2x_848xx_set_led(struct bnx2x *bp, 6018static void bnx2x_848xx_set_led(struct bnx2x *bp,
5906 struct bnx2x_phy *phy) 6019 struct bnx2x_phy *phy)
5907{ 6020{
5908 u16 val; 6021 u16 val, adj;
6022
6023 adj = 0;
6024 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
6025 adj = -1;
5909 6026
5910 /* PHYC_CTL_LED_CTL */ 6027 /* PHYC_CTL_LED_CTL */
5911 bnx2x_cl45_read(bp, phy, 6028 bnx2x_cl45_read(bp, phy,
5912 MDIO_PMA_DEVAD, 6029 MDIO_PMA_DEVAD,
5913 MDIO_PMA_REG_8481_LINK_SIGNAL, &val); 6030 MDIO_PMA_REG_8481_LINK_SIGNAL + adj, &val);
5914 val &= 0xFE00; 6031 val &= 0xFE00;
5915 val |= 0x0092; 6032 val |= 0x0092;
5916 6033
5917 bnx2x_cl45_write(bp, phy, 6034 bnx2x_cl45_write(bp, phy,
5918 MDIO_PMA_DEVAD, 6035 MDIO_PMA_DEVAD,
5919 MDIO_PMA_REG_8481_LINK_SIGNAL, val); 6036 MDIO_PMA_REG_8481_LINK_SIGNAL + adj, val);
5920 6037
5921 bnx2x_cl45_write(bp, phy, 6038 bnx2x_cl45_write(bp, phy,
5922 MDIO_PMA_DEVAD, 6039 MDIO_PMA_DEVAD,
5923 MDIO_PMA_REG_8481_LED1_MASK, 6040 MDIO_PMA_REG_8481_LED1_MASK + adj,
5924 0x80); 6041 0x80);
5925 6042
5926 bnx2x_cl45_write(bp, phy, 6043 bnx2x_cl45_write(bp, phy,
5927 MDIO_PMA_DEVAD, 6044 MDIO_PMA_DEVAD,
5928 MDIO_PMA_REG_8481_LED2_MASK, 6045 MDIO_PMA_REG_8481_LED2_MASK + adj,
5929 0x18); 6046 0x18);
5930 6047
5931 /* Select activity source by Tx and Rx, as suggested by PHY AE */ 6048 /* Select activity source by Tx and Rx, as suggested by PHY AE */
5932 bnx2x_cl45_write(bp, phy, 6049 bnx2x_cl45_write(bp, phy,
5933 MDIO_PMA_DEVAD, 6050 MDIO_PMA_DEVAD,
5934 MDIO_PMA_REG_8481_LED3_MASK, 6051 MDIO_PMA_REG_8481_LED3_MASK + adj,
5935 0x0006); 6052 0x0006);
5936 6053
5937 /* Select the closest activity blink rate to that in 10/100/1000 */ 6054 /* Select the closest activity blink rate to that in 10/100/1000 */
5938 bnx2x_cl45_write(bp, phy, 6055 bnx2x_cl45_write(bp, phy,
5939 MDIO_PMA_DEVAD, 6056 MDIO_PMA_DEVAD,
5940 MDIO_PMA_REG_8481_LED3_BLINK, 6057 MDIO_PMA_REG_8481_LED3_BLINK + adj,
5941 0); 6058 0);
5942 6059
5943 bnx2x_cl45_read(bp, phy, 6060 bnx2x_cl45_read(bp, phy,
5944 MDIO_PMA_DEVAD, 6061 MDIO_PMA_DEVAD,
5945 MDIO_PMA_REG_84823_CTL_LED_CTL_1, &val); 6062 MDIO_PMA_REG_84823_CTL_LED_CTL_1 + adj, &val);
5946 val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/ 6063 val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/
5947 6064
5948 bnx2x_cl45_write(bp, phy, 6065 bnx2x_cl45_write(bp, phy,
5949 MDIO_PMA_DEVAD, 6066 MDIO_PMA_DEVAD,
5950 MDIO_PMA_REG_84823_CTL_LED_CTL_1, val); 6067 MDIO_PMA_REG_84823_CTL_LED_CTL_1 + adj, val);
5951 6068
5952 /* 'Interrupt Mask' */ 6069 /* 'Interrupt Mask' */
5953 bnx2x_cl45_write(bp, phy, 6070 bnx2x_cl45_write(bp, phy,
@@ -5961,7 +6078,11 @@ static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
5961{ 6078{
5962 struct bnx2x *bp = params->bp; 6079 struct bnx2x *bp = params->bp;
5963 u16 autoneg_val, an_1000_val, an_10_100_val; 6080 u16 autoneg_val, an_1000_val, an_10_100_val;
5964 6081 /*
6082 * This phy uses the NIG latch mechanism since link indication
6083 * arrives through its LED4 and not via its LASI signal, so we
6084 * get steady signal instead of clear on read
6085 */
5965 bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4, 6086 bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
5966 1 << NIG_LATCH_BC_ENABLE_MI_INT); 6087 1 << NIG_LATCH_BC_ENABLE_MI_INT);
5967 6088
@@ -6086,11 +6207,11 @@ static u8 bnx2x_8481_config_init(struct bnx2x_phy *phy,
6086 struct bnx2x *bp = params->bp; 6207 struct bnx2x *bp = params->bp;
6087 /* Restore normal power mode*/ 6208 /* Restore normal power mode*/
6088 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 6209 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
6089 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); 6210 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
6090 6211
6091 /* HW reset */ 6212 /* HW reset */
6092 bnx2x_ext_phy_hw_reset(bp, params->port); 6213 bnx2x_ext_phy_hw_reset(bp, params->port);
6093 bnx2x_wait_reset_complete(bp, phy); 6214 bnx2x_wait_reset_complete(bp, phy, params);
6094 6215
6095 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); 6216 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
6096 return bnx2x_848xx_cmn_config_init(phy, params, vars); 6217 return bnx2x_848xx_cmn_config_init(phy, params, vars);
@@ -6102,12 +6223,15 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
6102{ 6223{
6103 struct bnx2x *bp = params->bp; 6224 struct bnx2x *bp = params->bp;
6104 u8 port, initialize = 1; 6225 u8 port, initialize = 1;
6105 u16 val; 6226 u16 val, adj;
6106 u16 temp; 6227 u16 temp;
6107 u32 actual_phy_selection; 6228 u32 actual_phy_selection, cms_enable;
6108 u8 rc = 0; 6229 u8 rc = 0;
6109 6230
6110 /* This is just for MDIO_CTL_REG_84823_MEDIA register. */ 6231 /* This is just for MDIO_CTL_REG_84823_MEDIA register. */
6232 adj = 0;
6233 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
6234 adj = 3;
6111 6235
6112 msleep(1); 6236 msleep(1);
6113 if (CHIP_IS_E2(bp)) 6237 if (CHIP_IS_E2(bp))
@@ -6117,11 +6241,12 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
6117 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, 6241 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6118 MISC_REGISTERS_GPIO_OUTPUT_HIGH, 6242 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
6119 port); 6243 port);
6120 bnx2x_wait_reset_complete(bp, phy); 6244 bnx2x_wait_reset_complete(bp, phy, params);
6121 /* Wait for GPHY to come out of reset */ 6245 /* Wait for GPHY to come out of reset */
6122 msleep(50); 6246 msleep(50);
6123 /* BCM84823 requires that XGXS links up first @ 10G for normal 6247 /*
6124 behavior */ 6248 * BCM84823 requires that XGXS links up first @ 10G for normal behavior
6249 */
6125 temp = vars->line_speed; 6250 temp = vars->line_speed;
6126 vars->line_speed = SPEED_10000; 6251 vars->line_speed = SPEED_10000;
6127 bnx2x_set_autoneg(&params->phy[INT_PHY], params, vars, 0); 6252 bnx2x_set_autoneg(&params->phy[INT_PHY], params, vars, 0);
@@ -6131,7 +6256,7 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
6131 /* Set dual-media configuration according to configuration */ 6256 /* Set dual-media configuration according to configuration */
6132 6257
6133 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 6258 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
6134 MDIO_CTL_REG_84823_MEDIA, &val); 6259 MDIO_CTL_REG_84823_MEDIA + adj, &val);
6135 val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK | 6260 val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK |
6136 MDIO_CTL_REG_84823_MEDIA_LINE_MASK | 6261 MDIO_CTL_REG_84823_MEDIA_LINE_MASK |
6137 MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN | 6262 MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN |
@@ -6164,7 +6289,7 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
6164 val |= MDIO_CTL_REG_84823_MEDIA_FIBER_1G; 6289 val |= MDIO_CTL_REG_84823_MEDIA_FIBER_1G;
6165 6290
6166 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, 6291 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
6167 MDIO_CTL_REG_84823_MEDIA, val); 6292 MDIO_CTL_REG_84823_MEDIA + adj, val);
6168 DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n", 6293 DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n",
6169 params->multi_phy_config, val); 6294 params->multi_phy_config, val);
6170 6295
@@ -6172,23 +6297,43 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
6172 rc = bnx2x_848xx_cmn_config_init(phy, params, vars); 6297 rc = bnx2x_848xx_cmn_config_init(phy, params, vars);
6173 else 6298 else
6174 bnx2x_save_848xx_spirom_version(phy, params); 6299 bnx2x_save_848xx_spirom_version(phy, params);
6300 cms_enable = REG_RD(bp, params->shmem_base +
6301 offsetof(struct shmem_region,
6302 dev_info.port_hw_config[params->port].default_cfg)) &
6303 PORT_HW_CFG_ENABLE_CMS_MASK;
6304
6305 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
6306 MDIO_CTL_REG_84823_USER_CTRL_REG, &val);
6307 if (cms_enable)
6308 val |= MDIO_CTL_REG_84823_USER_CTRL_CMS;
6309 else
6310 val &= ~MDIO_CTL_REG_84823_USER_CTRL_CMS;
6311 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
6312 MDIO_CTL_REG_84823_USER_CTRL_REG, val);
6313
6314
6175 return rc; 6315 return rc;
6176} 6316}
6177 6317
6178static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy, 6318static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
6179 struct link_params *params, 6319 struct link_params *params,
6180 struct link_vars *vars) 6320 struct link_vars *vars)
6181{ 6321{
6182 struct bnx2x *bp = params->bp; 6322 struct bnx2x *bp = params->bp;
6183 u16 val, val1, val2; 6323 u16 val, val1, val2, adj;
6184 u8 link_up = 0; 6324 u8 link_up = 0;
6185 6325
6326 /* Reg offset adjustment for 84833 */
6327 adj = 0;
6328 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
6329 adj = -1;
6330
6186 /* Check 10G-BaseT link status */ 6331 /* Check 10G-BaseT link status */
6187 /* Check PMD signal ok */ 6332 /* Check PMD signal ok */
6188 bnx2x_cl45_read(bp, phy, 6333 bnx2x_cl45_read(bp, phy,
6189 MDIO_AN_DEVAD, 0xFFFA, &val1); 6334 MDIO_AN_DEVAD, 0xFFFA, &val1);
6190 bnx2x_cl45_read(bp, phy, 6335 bnx2x_cl45_read(bp, phy,
6191 MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL, 6336 MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL + adj,
6192 &val2); 6337 &val2);
6193 DP(NETIF_MSG_LINK, "BCM848xx: PMD_SIGNAL 1.a811 = 0x%x\n", val2); 6338 DP(NETIF_MSG_LINK, "BCM848xx: PMD_SIGNAL 1.a811 = 0x%x\n", val2);
6194 6339
@@ -6273,9 +6418,9 @@ static void bnx2x_8481_hw_reset(struct bnx2x_phy *phy,
6273 struct link_params *params) 6418 struct link_params *params)
6274{ 6419{
6275 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1, 6420 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
6276 MISC_REGISTERS_GPIO_OUTPUT_LOW, 0); 6421 MISC_REGISTERS_GPIO_OUTPUT_LOW, 0);
6277 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1, 6422 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
6278 MISC_REGISTERS_GPIO_OUTPUT_LOW, 1); 6423 MISC_REGISTERS_GPIO_OUTPUT_LOW, 1);
6279} 6424}
6280 6425
6281static void bnx2x_8481_link_reset(struct bnx2x_phy *phy, 6426static void bnx2x_8481_link_reset(struct bnx2x_phy *phy,
@@ -6297,8 +6442,8 @@ static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
6297 else 6442 else
6298 port = params->port; 6443 port = params->port;
6299 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, 6444 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6300 MISC_REGISTERS_GPIO_OUTPUT_LOW, 6445 MISC_REGISTERS_GPIO_OUTPUT_LOW,
6301 port); 6446 port);
6302} 6447}
6303 6448
6304static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, 6449static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
@@ -6353,24 +6498,24 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
6353 6498
6354 /* Set LED masks */ 6499 /* Set LED masks */
6355 bnx2x_cl45_write(bp, phy, 6500 bnx2x_cl45_write(bp, phy,
6356 MDIO_PMA_DEVAD, 6501 MDIO_PMA_DEVAD,
6357 MDIO_PMA_REG_8481_LED1_MASK, 6502 MDIO_PMA_REG_8481_LED1_MASK,
6358 0x0); 6503 0x0);
6359 6504
6360 bnx2x_cl45_write(bp, phy, 6505 bnx2x_cl45_write(bp, phy,
6361 MDIO_PMA_DEVAD, 6506 MDIO_PMA_DEVAD,
6362 MDIO_PMA_REG_8481_LED2_MASK, 6507 MDIO_PMA_REG_8481_LED2_MASK,
6363 0x0); 6508 0x0);
6364 6509
6365 bnx2x_cl45_write(bp, phy, 6510 bnx2x_cl45_write(bp, phy,
6366 MDIO_PMA_DEVAD, 6511 MDIO_PMA_DEVAD,
6367 MDIO_PMA_REG_8481_LED3_MASK, 6512 MDIO_PMA_REG_8481_LED3_MASK,
6368 0x0); 6513 0x0);
6369 6514
6370 bnx2x_cl45_write(bp, phy, 6515 bnx2x_cl45_write(bp, phy,
6371 MDIO_PMA_DEVAD, 6516 MDIO_PMA_DEVAD,
6372 MDIO_PMA_REG_8481_LED5_MASK, 6517 MDIO_PMA_REG_8481_LED5_MASK,
6373 0x20); 6518 0x20);
6374 6519
6375 } else { 6520 } else {
6376 bnx2x_cl45_write(bp, phy, 6521 bnx2x_cl45_write(bp, phy,
@@ -6394,35 +6539,35 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
6394 val |= 0x2492; 6539 val |= 0x2492;
6395 6540
6396 bnx2x_cl45_write(bp, phy, 6541 bnx2x_cl45_write(bp, phy,
6397 MDIO_PMA_DEVAD, 6542 MDIO_PMA_DEVAD,
6398 MDIO_PMA_REG_8481_LINK_SIGNAL, 6543 MDIO_PMA_REG_8481_LINK_SIGNAL,
6399 val); 6544 val);
6400 6545
6401 /* Set LED masks */ 6546 /* Set LED masks */
6402 bnx2x_cl45_write(bp, phy, 6547 bnx2x_cl45_write(bp, phy,
6403 MDIO_PMA_DEVAD, 6548 MDIO_PMA_DEVAD,
6404 MDIO_PMA_REG_8481_LED1_MASK, 6549 MDIO_PMA_REG_8481_LED1_MASK,
6405 0x0); 6550 0x0);
6406 6551
6407 bnx2x_cl45_write(bp, phy, 6552 bnx2x_cl45_write(bp, phy,
6408 MDIO_PMA_DEVAD, 6553 MDIO_PMA_DEVAD,
6409 MDIO_PMA_REG_8481_LED2_MASK, 6554 MDIO_PMA_REG_8481_LED2_MASK,
6410 0x20); 6555 0x20);
6411 6556
6412 bnx2x_cl45_write(bp, phy, 6557 bnx2x_cl45_write(bp, phy,
6413 MDIO_PMA_DEVAD, 6558 MDIO_PMA_DEVAD,
6414 MDIO_PMA_REG_8481_LED3_MASK, 6559 MDIO_PMA_REG_8481_LED3_MASK,
6415 0x20); 6560 0x20);
6416 6561
6417 bnx2x_cl45_write(bp, phy, 6562 bnx2x_cl45_write(bp, phy,
6418 MDIO_PMA_DEVAD, 6563 MDIO_PMA_DEVAD,
6419 MDIO_PMA_REG_8481_LED5_MASK, 6564 MDIO_PMA_REG_8481_LED5_MASK,
6420 0x0); 6565 0x0);
6421 } else { 6566 } else {
6422 bnx2x_cl45_write(bp, phy, 6567 bnx2x_cl45_write(bp, phy,
6423 MDIO_PMA_DEVAD, 6568 MDIO_PMA_DEVAD,
6424 MDIO_PMA_REG_8481_LED1_MASK, 6569 MDIO_PMA_REG_8481_LED1_MASK,
6425 0x20); 6570 0x20);
6426 } 6571 }
6427 break; 6572 break;
6428 6573
@@ -6440,9 +6585,9 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
6440 &val); 6585 &val);
6441 6586
6442 if (!((val & 6587 if (!((val &
6443 MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK) 6588 MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK)
6444 >> MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)){ 6589 >> MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)) {
6445 DP(NETIF_MSG_LINK, "Seting LINK_SIGNAL\n"); 6590 DP(NETIF_MSG_LINK, "Setting LINK_SIGNAL\n");
6446 bnx2x_cl45_write(bp, phy, 6591 bnx2x_cl45_write(bp, phy,
6447 MDIO_PMA_DEVAD, 6592 MDIO_PMA_DEVAD,
6448 MDIO_PMA_REG_8481_LINK_SIGNAL, 6593 MDIO_PMA_REG_8481_LINK_SIGNAL,
@@ -6451,24 +6596,24 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
6451 6596
6452 /* Set LED masks */ 6597 /* Set LED masks */
6453 bnx2x_cl45_write(bp, phy, 6598 bnx2x_cl45_write(bp, phy,
6454 MDIO_PMA_DEVAD, 6599 MDIO_PMA_DEVAD,
6455 MDIO_PMA_REG_8481_LED1_MASK, 6600 MDIO_PMA_REG_8481_LED1_MASK,
6456 0x10); 6601 0x10);
6457 6602
6458 bnx2x_cl45_write(bp, phy, 6603 bnx2x_cl45_write(bp, phy,
6459 MDIO_PMA_DEVAD, 6604 MDIO_PMA_DEVAD,
6460 MDIO_PMA_REG_8481_LED2_MASK, 6605 MDIO_PMA_REG_8481_LED2_MASK,
6461 0x80); 6606 0x80);
6462 6607
6463 bnx2x_cl45_write(bp, phy, 6608 bnx2x_cl45_write(bp, phy,
6464 MDIO_PMA_DEVAD, 6609 MDIO_PMA_DEVAD,
6465 MDIO_PMA_REG_8481_LED3_MASK, 6610 MDIO_PMA_REG_8481_LED3_MASK,
6466 0x98); 6611 0x98);
6467 6612
6468 bnx2x_cl45_write(bp, phy, 6613 bnx2x_cl45_write(bp, phy,
6469 MDIO_PMA_DEVAD, 6614 MDIO_PMA_DEVAD,
6470 MDIO_PMA_REG_8481_LED5_MASK, 6615 MDIO_PMA_REG_8481_LED5_MASK,
6471 0x40); 6616 0x40);
6472 6617
6473 } else { 6618 } else {
6474 bnx2x_cl45_write(bp, phy, 6619 bnx2x_cl45_write(bp, phy,
@@ -6513,10 +6658,10 @@ static u8 bnx2x_7101_config_init(struct bnx2x_phy *phy,
6513 6658
6514 /* Restore normal power mode*/ 6659 /* Restore normal power mode*/
6515 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 6660 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
6516 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); 6661 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
6517 /* HW reset */ 6662 /* HW reset */
6518 bnx2x_ext_phy_hw_reset(bp, params->port); 6663 bnx2x_ext_phy_hw_reset(bp, params->port);
6519 bnx2x_wait_reset_complete(bp, phy); 6664 bnx2x_wait_reset_complete(bp, phy, params);
6520 6665
6521 bnx2x_cl45_write(bp, phy, 6666 bnx2x_cl45_write(bp, phy,
6522 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x1); 6667 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x1);
@@ -6563,9 +6708,7 @@ static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
6563 DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n", 6708 DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n",
6564 val2, val1); 6709 val2, val1);
6565 link_up = ((val1 & 4) == 4); 6710 link_up = ((val1 & 4) == 4);
6566 /* if link is up 6711 /* if link is up print the AN outcome of the SFX7101 PHY */
6567 * print the AN outcome of the SFX7101 PHY
6568 */
6569 if (link_up) { 6712 if (link_up) {
6570 bnx2x_cl45_read(bp, phy, 6713 bnx2x_cl45_read(bp, phy,
6571 MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS, 6714 MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS,
@@ -6599,20 +6742,20 @@ void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy)
6599 u16 val, cnt; 6742 u16 val, cnt;
6600 6743
6601 bnx2x_cl45_read(bp, phy, 6744 bnx2x_cl45_read(bp, phy,
6602 MDIO_PMA_DEVAD, 6745 MDIO_PMA_DEVAD,
6603 MDIO_PMA_REG_7101_RESET, &val); 6746 MDIO_PMA_REG_7101_RESET, &val);
6604 6747
6605 for (cnt = 0; cnt < 10; cnt++) { 6748 for (cnt = 0; cnt < 10; cnt++) {
6606 msleep(50); 6749 msleep(50);
6607 /* Writes a self-clearing reset */ 6750 /* Writes a self-clearing reset */
6608 bnx2x_cl45_write(bp, phy, 6751 bnx2x_cl45_write(bp, phy,
6609 MDIO_PMA_DEVAD, 6752 MDIO_PMA_DEVAD,
6610 MDIO_PMA_REG_7101_RESET, 6753 MDIO_PMA_REG_7101_RESET,
6611 (val | (1<<15))); 6754 (val | (1<<15)));
6612 /* Wait for clear */ 6755 /* Wait for clear */
6613 bnx2x_cl45_read(bp, phy, 6756 bnx2x_cl45_read(bp, phy,
6614 MDIO_PMA_DEVAD, 6757 MDIO_PMA_DEVAD,
6615 MDIO_PMA_REG_7101_RESET, &val); 6758 MDIO_PMA_REG_7101_RESET, &val);
6616 6759
6617 if ((val & (1<<15)) == 0) 6760 if ((val & (1<<15)) == 0)
6618 break; 6761 break;
@@ -6623,10 +6766,10 @@ static void bnx2x_7101_hw_reset(struct bnx2x_phy *phy,
6623 struct link_params *params) { 6766 struct link_params *params) {
6624 /* Low power mode is controlled by GPIO 2 */ 6767 /* Low power mode is controlled by GPIO 2 */
6625 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_2, 6768 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_2,
6626 MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port); 6769 MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
6627 /* The PHY reset is controlled by GPIO 1 */ 6770 /* The PHY reset is controlled by GPIO 1 */
6628 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1, 6771 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
6629 MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port); 6772 MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
6630} 6773}
6631 6774
6632static void bnx2x_7101_set_link_led(struct bnx2x_phy *phy, 6775static void bnx2x_7101_set_link_led(struct bnx2x_phy *phy,
@@ -6668,9 +6811,9 @@ static struct bnx2x_phy phy_null = {
6668 .supported = 0, 6811 .supported = 0,
6669 .media_type = ETH_PHY_NOT_PRESENT, 6812 .media_type = ETH_PHY_NOT_PRESENT,
6670 .ver_addr = 0, 6813 .ver_addr = 0,
6671 .req_flow_ctrl = 0, 6814 .req_flow_ctrl = 0,
6672 .req_line_speed = 0, 6815 .req_line_speed = 0,
6673 .speed_cap_mask = 0, 6816 .speed_cap_mask = 0,
6674 .req_duplex = 0, 6817 .req_duplex = 0,
6675 .rsrv = 0, 6818 .rsrv = 0,
6676 .config_init = (config_init_t)NULL, 6819 .config_init = (config_init_t)NULL,
@@ -6705,8 +6848,8 @@ static struct bnx2x_phy phy_serdes = {
6705 .media_type = ETH_PHY_UNSPECIFIED, 6848 .media_type = ETH_PHY_UNSPECIFIED,
6706 .ver_addr = 0, 6849 .ver_addr = 0,
6707 .req_flow_ctrl = 0, 6850 .req_flow_ctrl = 0,
6708 .req_line_speed = 0, 6851 .req_line_speed = 0,
6709 .speed_cap_mask = 0, 6852 .speed_cap_mask = 0,
6710 .req_duplex = 0, 6853 .req_duplex = 0,
6711 .rsrv = 0, 6854 .rsrv = 0,
6712 .config_init = (config_init_t)bnx2x_init_serdes, 6855 .config_init = (config_init_t)bnx2x_init_serdes,
@@ -6742,8 +6885,8 @@ static struct bnx2x_phy phy_xgxs = {
6742 .media_type = ETH_PHY_UNSPECIFIED, 6885 .media_type = ETH_PHY_UNSPECIFIED,
6743 .ver_addr = 0, 6886 .ver_addr = 0,
6744 .req_flow_ctrl = 0, 6887 .req_flow_ctrl = 0,
6745 .req_line_speed = 0, 6888 .req_line_speed = 0,
6746 .speed_cap_mask = 0, 6889 .speed_cap_mask = 0,
6747 .req_duplex = 0, 6890 .req_duplex = 0,
6748 .rsrv = 0, 6891 .rsrv = 0,
6749 .config_init = (config_init_t)bnx2x_init_xgxs, 6892 .config_init = (config_init_t)bnx2x_init_xgxs,
@@ -6773,8 +6916,8 @@ static struct bnx2x_phy phy_7101 = {
6773 .media_type = ETH_PHY_BASE_T, 6916 .media_type = ETH_PHY_BASE_T,
6774 .ver_addr = 0, 6917 .ver_addr = 0,
6775 .req_flow_ctrl = 0, 6918 .req_flow_ctrl = 0,
6776 .req_line_speed = 0, 6919 .req_line_speed = 0,
6777 .speed_cap_mask = 0, 6920 .speed_cap_mask = 0,
6778 .req_duplex = 0, 6921 .req_duplex = 0,
6779 .rsrv = 0, 6922 .rsrv = 0,
6780 .config_init = (config_init_t)bnx2x_7101_config_init, 6923 .config_init = (config_init_t)bnx2x_7101_config_init,
@@ -6804,9 +6947,9 @@ static struct bnx2x_phy phy_8073 = {
6804 SUPPORTED_Asym_Pause), 6947 SUPPORTED_Asym_Pause),
6805 .media_type = ETH_PHY_UNSPECIFIED, 6948 .media_type = ETH_PHY_UNSPECIFIED,
6806 .ver_addr = 0, 6949 .ver_addr = 0,
6807 .req_flow_ctrl = 0, 6950 .req_flow_ctrl = 0,
6808 .req_line_speed = 0, 6951 .req_line_speed = 0,
6809 .speed_cap_mask = 0, 6952 .speed_cap_mask = 0,
6810 .req_duplex = 0, 6953 .req_duplex = 0,
6811 .rsrv = 0, 6954 .rsrv = 0,
6812 .config_init = (config_init_t)bnx2x_8073_config_init, 6955 .config_init = (config_init_t)bnx2x_8073_config_init,
@@ -7015,6 +7158,43 @@ static struct bnx2x_phy phy_84823 = {
7015 .phy_specific_func = (phy_specific_func_t)NULL 7158 .phy_specific_func = (phy_specific_func_t)NULL
7016}; 7159};
7017 7160
7161static struct bnx2x_phy phy_84833 = {
7162 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833,
7163 .addr = 0xff,
7164 .flags = FLAGS_FAN_FAILURE_DET_REQ |
7165 FLAGS_REARM_LATCH_SIGNAL,
7166 .def_md_devad = 0,
7167 .reserved = 0,
7168 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
7169 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
7170 .mdio_ctrl = 0,
7171 .supported = (SUPPORTED_10baseT_Half |
7172 SUPPORTED_10baseT_Full |
7173 SUPPORTED_100baseT_Half |
7174 SUPPORTED_100baseT_Full |
7175 SUPPORTED_1000baseT_Full |
7176 SUPPORTED_10000baseT_Full |
7177 SUPPORTED_TP |
7178 SUPPORTED_Autoneg |
7179 SUPPORTED_Pause |
7180 SUPPORTED_Asym_Pause),
7181 .media_type = ETH_PHY_BASE_T,
7182 .ver_addr = 0,
7183 .req_flow_ctrl = 0,
7184 .req_line_speed = 0,
7185 .speed_cap_mask = 0,
7186 .req_duplex = 0,
7187 .rsrv = 0,
7188 .config_init = (config_init_t)bnx2x_848x3_config_init,
7189 .read_status = (read_status_t)bnx2x_848xx_read_status,
7190 .link_reset = (link_reset_t)bnx2x_848x3_link_reset,
7191 .config_loopback = (config_loopback_t)NULL,
7192 .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
7193 .hw_reset = (hw_reset_t)NULL,
7194 .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
7195 .phy_specific_func = (phy_specific_func_t)NULL
7196};
7197
7018/*****************************************************************/ 7198/*****************************************************************/
7019/* */ 7199/* */
7020/* Populate the phy according. Main function: bnx2x_populate_phy */ 7200/* Populate the phy according. Main function: bnx2x_populate_phy */
@@ -7028,7 +7208,7 @@ static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base,
7028 /* Get the 4 lanes xgxs config rx and tx */ 7208 /* Get the 4 lanes xgxs config rx and tx */
7029 u32 rx = 0, tx = 0, i; 7209 u32 rx = 0, tx = 0, i;
7030 for (i = 0; i < 2; i++) { 7210 for (i = 0; i < 2; i++) {
7031 /** 7211 /*
7032 * INT_PHY and EXT_PHY1 share the same value location in the 7212 * INT_PHY and EXT_PHY1 share the same value location in the
7033 * shmem. When num_phys is greater than 1, than this value 7213 * shmem. When num_phys is greater than 1, than this value
7034 * applies only to EXT_PHY1 7214 * applies only to EXT_PHY1
@@ -7036,19 +7216,19 @@ static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base,
7036 if (phy_index == INT_PHY || phy_index == EXT_PHY1) { 7216 if (phy_index == INT_PHY || phy_index == EXT_PHY1) {
7037 rx = REG_RD(bp, shmem_base + 7217 rx = REG_RD(bp, shmem_base +
7038 offsetof(struct shmem_region, 7218 offsetof(struct shmem_region,
7039 dev_info.port_hw_config[port].xgxs_config_rx[i<<1])); 7219 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]));
7040 7220
7041 tx = REG_RD(bp, shmem_base + 7221 tx = REG_RD(bp, shmem_base +
7042 offsetof(struct shmem_region, 7222 offsetof(struct shmem_region,
7043 dev_info.port_hw_config[port].xgxs_config_tx[i<<1])); 7223 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]));
7044 } else { 7224 } else {
7045 rx = REG_RD(bp, shmem_base + 7225 rx = REG_RD(bp, shmem_base +
7046 offsetof(struct shmem_region, 7226 offsetof(struct shmem_region,
7047 dev_info.port_hw_config[port].xgxs_config2_rx[i<<1])); 7227 dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
7048 7228
7049 tx = REG_RD(bp, shmem_base + 7229 tx = REG_RD(bp, shmem_base +
7050 offsetof(struct shmem_region, 7230 offsetof(struct shmem_region,
7051 dev_info.port_hw_config[port].xgxs_config2_rx[i<<1])); 7231 dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
7052 } 7232 }
7053 7233
7054 phy->rx_preemphasis[i << 1] = ((rx>>16) & 0xffff); 7234 phy->rx_preemphasis[i << 1] = ((rx>>16) & 0xffff);
@@ -7168,6 +7348,9 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp,
7168 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823: 7348 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
7169 *phy = phy_84823; 7349 *phy = phy_84823;
7170 break; 7350 break;
7351 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
7352 *phy = phy_84833;
7353 break;
7171 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: 7354 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7172 *phy = phy_7101; 7355 *phy = phy_7101;
7173 break; 7356 break;
@@ -7182,21 +7365,21 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp,
7182 phy->addr = XGXS_EXT_PHY_ADDR(ext_phy_config); 7365 phy->addr = XGXS_EXT_PHY_ADDR(ext_phy_config);
7183 bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index); 7366 bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index);
7184 7367
7185 /** 7368 /*
7186 * The shmem address of the phy version is located on different 7369 * The shmem address of the phy version is located on different
7187 * structures. In case this structure is too old, do not set 7370 * structures. In case this structure is too old, do not set
7188 * the address 7371 * the address
7189 */ 7372 */
7190 config2 = REG_RD(bp, shmem_base + offsetof(struct shmem_region, 7373 config2 = REG_RD(bp, shmem_base + offsetof(struct shmem_region,
7191 dev_info.shared_hw_config.config2)); 7374 dev_info.shared_hw_config.config2));
7192 if (phy_index == EXT_PHY1) { 7375 if (phy_index == EXT_PHY1) {
7193 phy->ver_addr = shmem_base + offsetof(struct shmem_region, 7376 phy->ver_addr = shmem_base + offsetof(struct shmem_region,
7194 port_mb[port].ext_phy_fw_version); 7377 port_mb[port].ext_phy_fw_version);
7195 7378
7196 /* Check specific mdc mdio settings */ 7379 /* Check specific mdc mdio settings */
7197 if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK) 7380 if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK)
7198 mdc_mdio_access = config2 & 7381 mdc_mdio_access = config2 &
7199 SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK; 7382 SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK;
7200 } else { 7383 } else {
7201 u32 size = REG_RD(bp, shmem2_base); 7384 u32 size = REG_RD(bp, shmem2_base);
7202 7385
@@ -7215,7 +7398,7 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp,
7215 } 7398 }
7216 phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port); 7399 phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port);
7217 7400
7218 /** 7401 /*
7219 * In case mdc/mdio_access of the external phy is different than the 7402 * In case mdc/mdio_access of the external phy is different than the
7220 * mdc/mdio access of the XGXS, a HW lock must be taken in each access 7403 * mdc/mdio access of the XGXS, a HW lock must be taken in each access
7221 * to prevent one port interfere with another port's CL45 operations. 7404 * to prevent one port interfere with another port's CL45 operations.
@@ -7250,18 +7433,20 @@ static void bnx2x_phy_def_cfg(struct link_params *params,
7250 /* Populate the default phy configuration for MF mode */ 7433 /* Populate the default phy configuration for MF mode */
7251 if (phy_index == EXT_PHY2) { 7434 if (phy_index == EXT_PHY2) {
7252 link_config = REG_RD(bp, params->shmem_base + 7435 link_config = REG_RD(bp, params->shmem_base +
7253 offsetof(struct shmem_region, dev_info. 7436 offsetof(struct shmem_region, dev_info.
7254 port_feature_config[params->port].link_config2)); 7437 port_feature_config[params->port].link_config2));
7255 phy->speed_cap_mask = REG_RD(bp, params->shmem_base + 7438 phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
7256 offsetof(struct shmem_region, dev_info. 7439 offsetof(struct shmem_region,
7440 dev_info.
7257 port_hw_config[params->port].speed_capability_mask2)); 7441 port_hw_config[params->port].speed_capability_mask2));
7258 } else { 7442 } else {
7259 link_config = REG_RD(bp, params->shmem_base + 7443 link_config = REG_RD(bp, params->shmem_base +
7260 offsetof(struct shmem_region, dev_info. 7444 offsetof(struct shmem_region, dev_info.
7261 port_feature_config[params->port].link_config)); 7445 port_feature_config[params->port].link_config));
7262 phy->speed_cap_mask = REG_RD(bp, params->shmem_base + 7446 phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
7263 offsetof(struct shmem_region, dev_info. 7447 offsetof(struct shmem_region,
7264 port_hw_config[params->port].speed_capability_mask)); 7448 dev_info.
7449 port_hw_config[params->port].speed_capability_mask));
7265 } 7450 }
7266 DP(NETIF_MSG_LINK, "Default config phy idx %x cfg 0x%x speed_cap_mask" 7451 DP(NETIF_MSG_LINK, "Default config phy idx %x cfg 0x%x speed_cap_mask"
7267 " 0x%x\n", phy_index, link_config, phy->speed_cap_mask); 7452 " 0x%x\n", phy_index, link_config, phy->speed_cap_mask);
@@ -7408,7 +7593,7 @@ static void set_phy_vars(struct link_params *params)
7408 else if (phy_index == EXT_PHY2) 7593 else if (phy_index == EXT_PHY2)
7409 actual_phy_idx = EXT_PHY1; 7594 actual_phy_idx = EXT_PHY1;
7410 } 7595 }
7411 params->phy[actual_phy_idx].req_flow_ctrl = 7596 params->phy[actual_phy_idx].req_flow_ctrl =
7412 params->req_flow_ctrl[link_cfg_idx]; 7597 params->req_flow_ctrl[link_cfg_idx];
7413 7598
7414 params->phy[actual_phy_idx].req_line_speed = 7599 params->phy[actual_phy_idx].req_line_speed =
@@ -7461,57 +7646,6 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
7461 set_phy_vars(params); 7646 set_phy_vars(params);
7462 7647
7463 DP(NETIF_MSG_LINK, "Num of phys on board: %d\n", params->num_phys); 7648 DP(NETIF_MSG_LINK, "Num of phys on board: %d\n", params->num_phys);
7464 if (CHIP_REV_IS_FPGA(bp)) {
7465
7466 vars->link_up = 1;
7467 vars->line_speed = SPEED_10000;
7468 vars->duplex = DUPLEX_FULL;
7469 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7470 vars->link_status = (LINK_STATUS_LINK_UP | LINK_10GTFD);
7471 /* enable on E1.5 FPGA */
7472 if (CHIP_IS_E1H(bp)) {
7473 vars->flow_ctrl |=
7474 (BNX2X_FLOW_CTRL_TX |
7475 BNX2X_FLOW_CTRL_RX);
7476 vars->link_status |=
7477 (LINK_STATUS_TX_FLOW_CONTROL_ENABLED |
7478 LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
7479 }
7480
7481 bnx2x_emac_enable(params, vars, 0);
7482 if (!(CHIP_IS_E2(bp)))
7483 bnx2x_pbf_update(params, vars->flow_ctrl,
7484 vars->line_speed);
7485 /* disable drain */
7486 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
7487
7488 /* update shared memory */
7489 bnx2x_update_mng(params, vars->link_status);
7490
7491 return 0;
7492
7493 } else
7494 if (CHIP_REV_IS_EMUL(bp)) {
7495
7496 vars->link_up = 1;
7497 vars->line_speed = SPEED_10000;
7498 vars->duplex = DUPLEX_FULL;
7499 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7500 vars->link_status = (LINK_STATUS_LINK_UP | LINK_10GTFD);
7501
7502 bnx2x_bmac_enable(params, vars, 0);
7503
7504 bnx2x_pbf_update(params, vars->flow_ctrl, vars->line_speed);
7505 /* Disable drain */
7506 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE
7507 + params->port*4, 0);
7508
7509 /* update shared memory */
7510 bnx2x_update_mng(params, vars->link_status);
7511
7512 return 0;
7513
7514 } else
7515 if (params->loopback_mode == LOOPBACK_BMAC) { 7649 if (params->loopback_mode == LOOPBACK_BMAC) {
7516 7650
7517 vars->link_up = 1; 7651 vars->link_up = 1;
@@ -7527,8 +7661,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
7527 /* set bmac loopback */ 7661 /* set bmac loopback */
7528 bnx2x_bmac_enable(params, vars, 1); 7662 bnx2x_bmac_enable(params, vars, 1);
7529 7663
7530 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + 7664 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
7531 params->port*4, 0);
7532 7665
7533 } else if (params->loopback_mode == LOOPBACK_EMAC) { 7666 } else if (params->loopback_mode == LOOPBACK_EMAC) {
7534 7667
@@ -7544,8 +7677,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
7544 /* set bmac loopback */ 7677 /* set bmac loopback */
7545 bnx2x_emac_enable(params, vars, 1); 7678 bnx2x_emac_enable(params, vars, 1);
7546 bnx2x_emac_program(params, vars); 7679 bnx2x_emac_program(params, vars);
7547 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + 7680 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
7548 params->port*4, 0);
7549 7681
7550 } else if ((params->loopback_mode == LOOPBACK_XGXS) || 7682 } else if ((params->loopback_mode == LOOPBACK_XGXS) ||
7551 (params->loopback_mode == LOOPBACK_EXT_PHY)) { 7683 (params->loopback_mode == LOOPBACK_EXT_PHY)) {
@@ -7568,8 +7700,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
7568 bnx2x_emac_program(params, vars); 7700 bnx2x_emac_program(params, vars);
7569 bnx2x_emac_enable(params, vars, 0); 7701 bnx2x_emac_enable(params, vars, 0);
7570 } else 7702 } else
7571 bnx2x_bmac_enable(params, vars, 0); 7703 bnx2x_bmac_enable(params, vars, 0);
7572
7573 if (params->loopback_mode == LOOPBACK_XGXS) { 7704 if (params->loopback_mode == LOOPBACK_XGXS) {
7574 /* set 10G XGXS loopback */ 7705 /* set 10G XGXS loopback */
7575 params->phy[INT_PHY].config_loopback( 7706 params->phy[INT_PHY].config_loopback(
@@ -7587,9 +7718,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
7587 params); 7718 params);
7588 } 7719 }
7589 } 7720 }
7590 7721 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
7591 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
7592 params->port*4, 0);
7593 7722
7594 bnx2x_set_led(params, vars, 7723 bnx2x_set_led(params, vars,
7595 LED_MODE_OPER, vars->line_speed); 7724 LED_MODE_OPER, vars->line_speed);
@@ -7608,7 +7737,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
7608 return 0; 7737 return 0;
7609} 7738}
7610u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars, 7739u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
7611 u8 reset_ext_phy) 7740 u8 reset_ext_phy)
7612{ 7741{
7613 struct bnx2x *bp = params->bp; 7742 struct bnx2x *bp = params->bp;
7614 u8 phy_index, port = params->port, clear_latch_ind = 0; 7743 u8 phy_index, port = params->port, clear_latch_ind = 0;
@@ -7617,10 +7746,10 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
7617 vars->link_status = 0; 7746 vars->link_status = 0;
7618 bnx2x_update_mng(params, vars->link_status); 7747 bnx2x_update_mng(params, vars->link_status);
7619 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 7748 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
7620 (NIG_MASK_XGXS0_LINK_STATUS | 7749 (NIG_MASK_XGXS0_LINK_STATUS |
7621 NIG_MASK_XGXS0_LINK10G | 7750 NIG_MASK_XGXS0_LINK10G |
7622 NIG_MASK_SERDES0_LINK_STATUS | 7751 NIG_MASK_SERDES0_LINK_STATUS |
7623 NIG_MASK_MI_INT)); 7752 NIG_MASK_MI_INT));
7624 7753
7625 /* activate nig drain */ 7754 /* activate nig drain */
7626 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); 7755 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
@@ -7719,21 +7848,22 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
7719 /* disable attentions */ 7848 /* disable attentions */
7720 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + 7849 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
7721 port_of_path*4, 7850 port_of_path*4,
7722 (NIG_MASK_XGXS0_LINK_STATUS | 7851 (NIG_MASK_XGXS0_LINK_STATUS |
7723 NIG_MASK_XGXS0_LINK10G | 7852 NIG_MASK_XGXS0_LINK10G |
7724 NIG_MASK_SERDES0_LINK_STATUS | 7853 NIG_MASK_SERDES0_LINK_STATUS |
7725 NIG_MASK_MI_INT)); 7854 NIG_MASK_MI_INT));
7726 7855
7727 /* Need to take the phy out of low power mode in order 7856 /* Need to take the phy out of low power mode in order
7728 to write to access its registers */ 7857 to write to access its registers */
7729 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 7858 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
7730 MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); 7859 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
7860 port);
7731 7861
7732 /* Reset the phy */ 7862 /* Reset the phy */
7733 bnx2x_cl45_write(bp, &phy[port], 7863 bnx2x_cl45_write(bp, &phy[port],
7734 MDIO_PMA_DEVAD, 7864 MDIO_PMA_DEVAD,
7735 MDIO_PMA_REG_CTRL, 7865 MDIO_PMA_REG_CTRL,
7736 1<<15); 7866 1<<15);
7737 } 7867 }
7738 7868
7739 /* Add delay of 150ms after reset */ 7869 /* Add delay of 150ms after reset */
@@ -7762,18 +7892,20 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
7762 7892
7763 /* Only set bit 10 = 1 (Tx power down) */ 7893 /* Only set bit 10 = 1 (Tx power down) */
7764 bnx2x_cl45_read(bp, phy_blk[port], 7894 bnx2x_cl45_read(bp, phy_blk[port],
7765 MDIO_PMA_DEVAD, 7895 MDIO_PMA_DEVAD,
7766 MDIO_PMA_REG_TX_POWER_DOWN, &val); 7896 MDIO_PMA_REG_TX_POWER_DOWN, &val);
7767 7897
7768 /* Phase1 of TX_POWER_DOWN reset */ 7898 /* Phase1 of TX_POWER_DOWN reset */
7769 bnx2x_cl45_write(bp, phy_blk[port], 7899 bnx2x_cl45_write(bp, phy_blk[port],
7770 MDIO_PMA_DEVAD, 7900 MDIO_PMA_DEVAD,
7771 MDIO_PMA_REG_TX_POWER_DOWN, 7901 MDIO_PMA_REG_TX_POWER_DOWN,
7772 (val | 1<<10)); 7902 (val | 1<<10));
7773 } 7903 }
7774 7904
7775 /* Toggle Transmitter: Power down and then up with 600ms 7905 /*
7776 delay between */ 7906 * Toggle Transmitter: Power down and then up with 600ms delay
7907 * between
7908 */
7777 msleep(600); 7909 msleep(600);
7778 7910
7779 /* PART3 - complete TX_POWER_DOWN process, and set GPIO2 back to low */ 7911 /* PART3 - complete TX_POWER_DOWN process, and set GPIO2 back to low */
@@ -7781,25 +7913,25 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
7781 /* Phase2 of POWER_DOWN_RESET */ 7913 /* Phase2 of POWER_DOWN_RESET */
7782 /* Release bit 10 (Release Tx power down) */ 7914 /* Release bit 10 (Release Tx power down) */
7783 bnx2x_cl45_read(bp, phy_blk[port], 7915 bnx2x_cl45_read(bp, phy_blk[port],
7784 MDIO_PMA_DEVAD, 7916 MDIO_PMA_DEVAD,
7785 MDIO_PMA_REG_TX_POWER_DOWN, &val); 7917 MDIO_PMA_REG_TX_POWER_DOWN, &val);
7786 7918
7787 bnx2x_cl45_write(bp, phy_blk[port], 7919 bnx2x_cl45_write(bp, phy_blk[port],
7788 MDIO_PMA_DEVAD, 7920 MDIO_PMA_DEVAD,
7789 MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10)))); 7921 MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
7790 msleep(15); 7922 msleep(15);
7791 7923
7792 /* Read modify write the SPI-ROM version select register */ 7924 /* Read modify write the SPI-ROM version select register */
7793 bnx2x_cl45_read(bp, phy_blk[port], 7925 bnx2x_cl45_read(bp, phy_blk[port],
7794 MDIO_PMA_DEVAD, 7926 MDIO_PMA_DEVAD,
7795 MDIO_PMA_REG_EDC_FFE_MAIN, &val); 7927 MDIO_PMA_REG_EDC_FFE_MAIN, &val);
7796 bnx2x_cl45_write(bp, phy_blk[port], 7928 bnx2x_cl45_write(bp, phy_blk[port],
7797 MDIO_PMA_DEVAD, 7929 MDIO_PMA_DEVAD,
7798 MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12))); 7930 MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12)));
7799 7931
7800 /* set GPIO2 back to LOW */ 7932 /* set GPIO2 back to LOW */
7801 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 7933 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
7802 MISC_REGISTERS_GPIO_OUTPUT_LOW, port); 7934 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
7803 } 7935 }
7804 return 0; 7936 return 0;
7805} 7937}
@@ -7846,32 +7978,90 @@ static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp,
7846 7978
7847 /* Set fault module detected LED on */ 7979 /* Set fault module detected LED on */
7848 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, 7980 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
7849 MISC_REGISTERS_GPIO_HIGH, 7981 MISC_REGISTERS_GPIO_HIGH,
7850 port); 7982 port);
7851 } 7983 }
7852 7984
7853 return 0; 7985 return 0;
7854} 7986}
7987static void bnx2x_get_ext_phy_reset_gpio(struct bnx2x *bp, u32 shmem_base,
7988 u8 *io_gpio, u8 *io_port)
7989{
7990
7991 u32 phy_gpio_reset = REG_RD(bp, shmem_base +
7992 offsetof(struct shmem_region,
7993 dev_info.port_hw_config[PORT_0].default_cfg));
7994 switch (phy_gpio_reset) {
7995 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0:
7996 *io_gpio = 0;
7997 *io_port = 0;
7998 break;
7999 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0:
8000 *io_gpio = 1;
8001 *io_port = 0;
8002 break;
8003 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0:
8004 *io_gpio = 2;
8005 *io_port = 0;
8006 break;
8007 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0:
8008 *io_gpio = 3;
8009 *io_port = 0;
8010 break;
8011 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1:
8012 *io_gpio = 0;
8013 *io_port = 1;
8014 break;
8015 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1:
8016 *io_gpio = 1;
8017 *io_port = 1;
8018 break;
8019 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1:
8020 *io_gpio = 2;
8021 *io_port = 1;
8022 break;
8023 case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1:
8024 *io_gpio = 3;
8025 *io_port = 1;
8026 break;
8027 default:
8028 /* Don't override the io_gpio and io_port */
8029 break;
8030 }
8031}
7855static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, 8032static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp,
7856 u32 shmem_base_path[], 8033 u32 shmem_base_path[],
7857 u32 shmem2_base_path[], u8 phy_index, 8034 u32 shmem2_base_path[], u8 phy_index,
7858 u32 chip_id) 8035 u32 chip_id)
7859{ 8036{
7860 s8 port; 8037 s8 port, reset_gpio;
7861 u32 swap_val, swap_override; 8038 u32 swap_val, swap_override;
7862 struct bnx2x_phy phy[PORT_MAX]; 8039 struct bnx2x_phy phy[PORT_MAX];
7863 struct bnx2x_phy *phy_blk[PORT_MAX]; 8040 struct bnx2x_phy *phy_blk[PORT_MAX];
7864 s8 port_of_path; 8041 s8 port_of_path;
7865 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); 8042 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7866 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); 8043 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7867 8044
8045 reset_gpio = MISC_REGISTERS_GPIO_1;
7868 port = 1; 8046 port = 1;
7869 8047
7870 bnx2x_ext_phy_hw_reset(bp, port ^ (swap_val && swap_override)); 8048 /*
8049 * Retrieve the reset gpio/port which control the reset.
8050 * Default is GPIO1, PORT1
8051 */
8052 bnx2x_get_ext_phy_reset_gpio(bp, shmem_base_path[0],
8053 (u8 *)&reset_gpio, (u8 *)&port);
7871 8054
7872 /* Calculate the port based on port swap */ 8055 /* Calculate the port based on port swap */
7873 port ^= (swap_val && swap_override); 8056 port ^= (swap_val && swap_override);
7874 8057
8058 /* Initiate PHY reset*/
8059 bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW,
8060 port);
8061 msleep(1);
8062 bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH,
8063 port);
8064
7875 msleep(5); 8065 msleep(5);
7876 8066
7877 /* PART1 - Reset both phys */ 8067 /* PART1 - Reset both phys */
@@ -7907,9 +8097,7 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp,
7907 8097
7908 /* Reset the phy */ 8098 /* Reset the phy */
7909 bnx2x_cl45_write(bp, &phy[port], 8099 bnx2x_cl45_write(bp, &phy[port],
7910 MDIO_PMA_DEVAD, 8100 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
7911 MDIO_PMA_REG_CTRL,
7912 1<<15);
7913 } 8101 }
7914 8102
7915 /* Add delay of 150ms after reset */ 8103 /* Add delay of 150ms after reset */
@@ -7923,7 +8111,7 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp,
7923 } 8111 }
7924 /* PART2 - Download firmware to both phys */ 8112 /* PART2 - Download firmware to both phys */
7925 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 8113 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
7926 if (CHIP_IS_E2(bp)) 8114 if (CHIP_IS_E2(bp))
7927 port_of_path = 0; 8115 port_of_path = 0;
7928 else 8116 else
7929 port_of_path = port; 8117 port_of_path = port;
@@ -7958,8 +8146,10 @@ static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
7958 break; 8146 break;
7959 8147
7960 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: 8148 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7961 /* GPIO1 affects both ports, so there's need to pull 8149 /*
7962 it for single port alone */ 8150 * GPIO1 affects both ports, so there's need to pull
8151 * it for single port alone
8152 */
7963 rc = bnx2x_8726_common_init_phy(bp, shmem_base_path, 8153 rc = bnx2x_8726_common_init_phy(bp, shmem_base_path,
7964 shmem2_base_path, 8154 shmem2_base_path,
7965 phy_index, chip_id); 8155 phy_index, chip_id);
@@ -7969,11 +8159,15 @@ static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
7969 break; 8159 break;
7970 default: 8160 default:
7971 DP(NETIF_MSG_LINK, 8161 DP(NETIF_MSG_LINK,
7972 "bnx2x_common_init_phy: ext_phy 0x%x not required\n", 8162 "ext_phy 0x%x common init not required\n",
7973 ext_phy_type); 8163 ext_phy_type);
7974 break; 8164 break;
7975 } 8165 }
7976 8166
8167 if (rc != 0)
8168 netdev_err(bp->dev, "Warning: PHY was not initialized,"
8169 " Port %d\n",
8170 0);
7977 return rc; 8171 return rc;
7978} 8172}
7979 8173
@@ -7986,9 +8180,6 @@ u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
7986 u32 ext_phy_type, ext_phy_config; 8180 u32 ext_phy_type, ext_phy_config;
7987 DP(NETIF_MSG_LINK, "Begin common phy init\n"); 8181 DP(NETIF_MSG_LINK, "Begin common phy init\n");
7988 8182
7989 if (CHIP_REV_IS_EMUL(bp))
7990 return 0;
7991
7992 /* Check if common init was already done */ 8183 /* Check if common init was already done */
7993 phy_ver = REG_RD(bp, shmem_base_path[0] + 8184 phy_ver = REG_RD(bp, shmem_base_path[0] +
7994 offsetof(struct shmem_region, 8185 offsetof(struct shmem_region,
diff --git a/drivers/net/bnx2x/bnx2x_link.h b/drivers/net/bnx2x/bnx2x_link.h
index bedab1a942c..92f36b6950d 100644
--- a/drivers/net/bnx2x/bnx2x_link.h
+++ b/drivers/net/bnx2x/bnx2x_link.h
@@ -1,4 +1,4 @@
1/* Copyright 2008-2010 Broadcom Corporation 1/* Copyright 2008-2011 Broadcom Corporation
2 * 2 *
3 * Unless you and Broadcom execute a separate written software license 3 * Unless you and Broadcom execute a separate written software license
4 * agreement governing use of this software, this software is licensed to you 4 * agreement governing use of this software, this software is licensed to you
@@ -33,7 +33,7 @@
33#define BNX2X_FLOW_CTRL_BOTH PORT_FEATURE_FLOW_CONTROL_BOTH 33#define BNX2X_FLOW_CTRL_BOTH PORT_FEATURE_FLOW_CONTROL_BOTH
34#define BNX2X_FLOW_CTRL_NONE PORT_FEATURE_FLOW_CONTROL_NONE 34#define BNX2X_FLOW_CTRL_NONE PORT_FEATURE_FLOW_CONTROL_NONE
35 35
36#define SPEED_AUTO_NEG 0 36#define SPEED_AUTO_NEG 0
37#define SPEED_12000 12000 37#define SPEED_12000 12000
38#define SPEED_12500 12500 38#define SPEED_12500 12500
39#define SPEED_13000 13000 39#define SPEED_13000 13000
@@ -44,8 +44,8 @@
44#define SFP_EEPROM_VENDOR_NAME_SIZE 16 44#define SFP_EEPROM_VENDOR_NAME_SIZE 16
45#define SFP_EEPROM_VENDOR_OUI_ADDR 0x25 45#define SFP_EEPROM_VENDOR_OUI_ADDR 0x25
46#define SFP_EEPROM_VENDOR_OUI_SIZE 3 46#define SFP_EEPROM_VENDOR_OUI_SIZE 3
47#define SFP_EEPROM_PART_NO_ADDR 0x28 47#define SFP_EEPROM_PART_NO_ADDR 0x28
48#define SFP_EEPROM_PART_NO_SIZE 16 48#define SFP_EEPROM_PART_NO_SIZE 16
49#define PWR_FLT_ERR_MSG_LEN 250 49#define PWR_FLT_ERR_MSG_LEN 250
50 50
51#define XGXS_EXT_PHY_TYPE(ext_phy_config) \ 51#define XGXS_EXT_PHY_TYPE(ext_phy_config) \
@@ -62,7 +62,7 @@
62#define SINGLE_MEDIA(params) (params->num_phys == 2) 62#define SINGLE_MEDIA(params) (params->num_phys == 2)
63/* Dual Media board contains two external phy with different media */ 63/* Dual Media board contains two external phy with different media */
64#define DUAL_MEDIA(params) (params->num_phys == 3) 64#define DUAL_MEDIA(params) (params->num_phys == 3)
65#define FW_PARAM_MDIO_CTRL_OFFSET 16 65#define FW_PARAM_MDIO_CTRL_OFFSET 16
66#define FW_PARAM_SET(phy_addr, phy_type, mdio_access) \ 66#define FW_PARAM_SET(phy_addr, phy_type, mdio_access) \
67 (phy_addr | phy_type | mdio_access << FW_PARAM_MDIO_CTRL_OFFSET) 67 (phy_addr | phy_type | mdio_access << FW_PARAM_MDIO_CTRL_OFFSET)
68 68
@@ -201,12 +201,14 @@ struct link_params {
201 201
202 /* Default / User Configuration */ 202 /* Default / User Configuration */
203 u8 loopback_mode; 203 u8 loopback_mode;
204#define LOOPBACK_NONE 0 204#define LOOPBACK_NONE 0
205#define LOOPBACK_EMAC 1 205#define LOOPBACK_EMAC 1
206#define LOOPBACK_BMAC 2 206#define LOOPBACK_BMAC 2
207#define LOOPBACK_XGXS 3 207#define LOOPBACK_XGXS 3
208#define LOOPBACK_EXT_PHY 4 208#define LOOPBACK_EXT_PHY 4
209#define LOOPBACK_EXT 5 209#define LOOPBACK_EXT 5
210#define LOOPBACK_UMAC 6
211#define LOOPBACK_XMAC 7
210 212
211 /* Device parameters */ 213 /* Device parameters */
212 u8 mac_addr[6]; 214 u8 mac_addr[6];
@@ -230,10 +232,11 @@ struct link_params {
230 /* Phy register parameter */ 232 /* Phy register parameter */
231 u32 chip_id; 233 u32 chip_id;
232 234
235 /* features */
233 u32 feature_config_flags; 236 u32 feature_config_flags;
234#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0) 237#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0)
235#define FEATURE_CONFIG_PFC_ENABLED (1<<1) 238#define FEATURE_CONFIG_PFC_ENABLED (1<<1)
236#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2) 239#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2)
237#define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3) 240#define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3)
238 /* Will be populated during common init */ 241 /* Will be populated during common init */
239 struct bnx2x_phy phy[MAX_PHYS]; 242 struct bnx2x_phy phy[MAX_PHYS];
@@ -334,6 +337,11 @@ void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port);
334/* Reset the external of SFX7101 */ 337/* Reset the external of SFX7101 */
335void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy); 338void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy);
336 339
340/* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */
341u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
342 struct link_params *params, u16 addr,
343 u8 byte_cnt, u8 *o_buf);
344
337void bnx2x_hw_reset_phy(struct link_params *params); 345void bnx2x_hw_reset_phy(struct link_params *params);
338 346
339/* Checks if HW lock is required for this phy/board type */ 347/* Checks if HW lock is required for this phy/board type */
@@ -379,7 +387,7 @@ void bnx2x_ets_disabled(struct link_params *params);
379 387
380/* Used to configure the ETS to BW limited */ 388/* Used to configure the ETS to BW limited */
381void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw, 389void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
382 const u32 cos1_bw); 390 const u32 cos1_bw);
383 391
384/* Used to configure the ETS to strict */ 392/* Used to configure the ETS to strict */
385u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos); 393u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos);
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index 032ae184b60..30b21d2f26f 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -586,7 +586,7 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
586 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]); 586 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
587 587
588 /* lock the dmae channel */ 588 /* lock the dmae channel */
589 mutex_lock(&bp->dmae_mutex); 589 spin_lock_bh(&bp->dmae_lock);
590 590
591 /* reset completion */ 591 /* reset completion */
592 *wb_comp = 0; 592 *wb_comp = 0;
@@ -617,7 +617,7 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
617 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]); 617 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
618 618
619unlock: 619unlock:
620 mutex_unlock(&bp->dmae_mutex); 620 spin_unlock_bh(&bp->dmae_lock);
621 return rc; 621 return rc;
622} 622}
623 623
@@ -1397,7 +1397,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1397 } 1397 }
1398 1398
1399 smp_mb__before_atomic_inc(); 1399 smp_mb__before_atomic_inc();
1400 atomic_inc(&bp->spq_left); 1400 atomic_inc(&bp->cq_spq_left);
1401 /* push the change in fp->state and towards the memory */ 1401 /* push the change in fp->state and towards the memory */
1402 smp_wmb(); 1402 smp_wmb();
1403 1403
@@ -2483,8 +2483,14 @@ static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2483 rxq_init->sge_map = fp->rx_sge_mapping; 2483 rxq_init->sge_map = fp->rx_sge_mapping;
2484 rxq_init->rcq_map = fp->rx_comp_mapping; 2484 rxq_init->rcq_map = fp->rx_comp_mapping;
2485 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE; 2485 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
2486 rxq_init->mtu = bp->dev->mtu; 2486
2487 rxq_init->buf_sz = bp->rx_buf_size; 2487 /* Always use mini-jumbo MTU for FCoE L2 ring */
2488 if (IS_FCOE_FP(fp))
2489 rxq_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2490 else
2491 rxq_init->mtu = bp->dev->mtu;
2492
2493 rxq_init->buf_sz = fp->rx_buf_size;
2488 rxq_init->cl_qzone_id = fp->cl_qzone_id; 2494 rxq_init->cl_qzone_id = fp->cl_qzone_id;
2489 rxq_init->cl_id = fp->cl_id; 2495 rxq_init->cl_id = fp->cl_id;
2490 rxq_init->spcl_id = fp->cl_id; 2496 rxq_init->spcl_id = fp->cl_id;
@@ -2736,11 +2742,18 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2736 2742
2737 spin_lock_bh(&bp->spq_lock); 2743 spin_lock_bh(&bp->spq_lock);
2738 2744
2739 if (!atomic_read(&bp->spq_left)) { 2745 if (common) {
2740 BNX2X_ERR("BUG! SPQ ring full!\n"); 2746 if (!atomic_read(&bp->eq_spq_left)) {
2741 spin_unlock_bh(&bp->spq_lock); 2747 BNX2X_ERR("BUG! EQ ring full!\n");
2742 bnx2x_panic(); 2748 spin_unlock_bh(&bp->spq_lock);
2743 return -EBUSY; 2749 bnx2x_panic();
2750 return -EBUSY;
2751 }
2752 } else if (!atomic_read(&bp->cq_spq_left)) {
2753 BNX2X_ERR("BUG! SPQ ring full!\n");
2754 spin_unlock_bh(&bp->spq_lock);
2755 bnx2x_panic();
2756 return -EBUSY;
2744 } 2757 }
2745 2758
2746 spe = bnx2x_sp_get_next(bp); 2759 spe = bnx2x_sp_get_next(bp);
@@ -2771,20 +2784,26 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2771 spe->data.update_data_addr.lo = cpu_to_le32(data_lo); 2784 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2772 2785
2773 /* stats ramrod has it's own slot on the spq */ 2786 /* stats ramrod has it's own slot on the spq */
2774 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY) 2787 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY) {
2775 /* It's ok if the actual decrement is issued towards the memory 2788 /* It's ok if the actual decrement is issued towards the memory
2776 * somewhere between the spin_lock and spin_unlock. Thus no 2789 * somewhere between the spin_lock and spin_unlock. Thus no
2777 * more explict memory barrier is needed. 2790 * more explict memory barrier is needed.
2778 */ 2791 */
2779 atomic_dec(&bp->spq_left); 2792 if (common)
2793 atomic_dec(&bp->eq_spq_left);
2794 else
2795 atomic_dec(&bp->cq_spq_left);
2796 }
2797
2780 2798
2781 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/, 2799 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2782 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) " 2800 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
2783 "type(0x%x) left %x\n", 2801 "type(0x%x) left (ETH, COMMON) (%x,%x)\n",
2784 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping), 2802 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2785 (u32)(U64_LO(bp->spq_mapping) + 2803 (u32)(U64_LO(bp->spq_mapping) +
2786 (void *)bp->spq_prod_bd - (void *)bp->spq), command, 2804 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2787 HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left)); 2805 HW_CID(bp, cid), data_hi, data_lo, type,
2806 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
2788 2807
2789 bnx2x_sp_prod_update(bp); 2808 bnx2x_sp_prod_update(bp);
2790 spin_unlock_bh(&bp->spq_lock); 2809 spin_unlock_bh(&bp->spq_lock);
@@ -3696,8 +3715,8 @@ static void bnx2x_eq_int(struct bnx2x *bp)
3696 sw_cons = bp->eq_cons; 3715 sw_cons = bp->eq_cons;
3697 sw_prod = bp->eq_prod; 3716 sw_prod = bp->eq_prod;
3698 3717
3699 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->spq_left %u\n", 3718 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->cq_spq_left %u\n",
3700 hw_cons, sw_cons, atomic_read(&bp->spq_left)); 3719 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
3701 3720
3702 for (; sw_cons != hw_cons; 3721 for (; sw_cons != hw_cons;
3703 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) { 3722 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
@@ -3762,13 +3781,15 @@ static void bnx2x_eq_int(struct bnx2x *bp)
3762 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN): 3781 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3763 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG): 3782 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3764 DP(NETIF_MSG_IFUP, "got set mac ramrod\n"); 3783 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
3765 bp->set_mac_pending = 0; 3784 if (elem->message.data.set_mac_event.echo)
3785 bp->set_mac_pending = 0;
3766 break; 3786 break;
3767 3787
3768 case (EVENT_RING_OPCODE_SET_MAC | 3788 case (EVENT_RING_OPCODE_SET_MAC |
3769 BNX2X_STATE_CLOSING_WAIT4_HALT): 3789 BNX2X_STATE_CLOSING_WAIT4_HALT):
3770 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n"); 3790 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
3771 bp->set_mac_pending = 0; 3791 if (elem->message.data.set_mac_event.echo)
3792 bp->set_mac_pending = 0;
3772 break; 3793 break;
3773 default: 3794 default:
3774 /* unknown event log error and continue */ 3795 /* unknown event log error and continue */
@@ -3780,7 +3801,7 @@ next_spqe:
3780 } /* for */ 3801 } /* for */
3781 3802
3782 smp_mb__before_atomic_inc(); 3803 smp_mb__before_atomic_inc();
3783 atomic_add(spqe_cnt, &bp->spq_left); 3804 atomic_add(spqe_cnt, &bp->eq_spq_left);
3784 3805
3785 bp->eq_cons = sw_cons; 3806 bp->eq_cons = sw_cons;
3786 bp->eq_prod = sw_prod; 3807 bp->eq_prod = sw_prod;
@@ -4213,7 +4234,7 @@ void bnx2x_update_coalesce(struct bnx2x *bp)
4213static void bnx2x_init_sp_ring(struct bnx2x *bp) 4234static void bnx2x_init_sp_ring(struct bnx2x *bp)
4214{ 4235{
4215 spin_lock_init(&bp->spq_lock); 4236 spin_lock_init(&bp->spq_lock);
4216 atomic_set(&bp->spq_left, MAX_SPQ_PENDING); 4237 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
4217 4238
4218 bp->spq_prod_idx = 0; 4239 bp->spq_prod_idx = 0;
4219 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX; 4240 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
@@ -4238,9 +4259,12 @@ static void bnx2x_init_eq_ring(struct bnx2x *bp)
4238 bp->eq_cons = 0; 4259 bp->eq_cons = 0;
4239 bp->eq_prod = NUM_EQ_DESC; 4260 bp->eq_prod = NUM_EQ_DESC;
4240 bp->eq_cons_sb = BNX2X_EQ_INDEX; 4261 bp->eq_cons_sb = BNX2X_EQ_INDEX;
4262 /* we want a warning message before it gets rought... */
4263 atomic_set(&bp->eq_spq_left,
4264 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
4241} 4265}
4242 4266
4243static void bnx2x_init_ind_table(struct bnx2x *bp) 4267void bnx2x_push_indir_table(struct bnx2x *bp)
4244{ 4268{
4245 int func = BP_FUNC(bp); 4269 int func = BP_FUNC(bp);
4246 int i; 4270 int i;
@@ -4248,13 +4272,20 @@ static void bnx2x_init_ind_table(struct bnx2x *bp)
4248 if (bp->multi_mode == ETH_RSS_MODE_DISABLED) 4272 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4249 return; 4273 return;
4250 4274
4251 DP(NETIF_MSG_IFUP,
4252 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
4253 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++) 4275 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4254 REG_WR8(bp, BAR_TSTRORM_INTMEM + 4276 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4255 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i, 4277 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4256 bp->fp->cl_id + (i % (bp->num_queues - 4278 bp->fp->cl_id + bp->rx_indir_table[i]);
4257 NONE_ETH_CONTEXT_USE))); 4279}
4280
4281static void bnx2x_init_ind_table(struct bnx2x *bp)
4282{
4283 int i;
4284
4285 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4286 bp->rx_indir_table[i] = i % BNX2X_NUM_ETH_QUEUES(bp);
4287
4288 bnx2x_push_indir_table(bp);
4258} 4289}
4259 4290
4260void bnx2x_set_storm_rx_mode(struct bnx2x *bp) 4291void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
@@ -5850,7 +5881,7 @@ int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5850 BP_ABS_FUNC(bp), load_code); 5881 BP_ABS_FUNC(bp), load_code);
5851 5882
5852 bp->dmae_ready = 0; 5883 bp->dmae_ready = 0;
5853 mutex_init(&bp->dmae_mutex); 5884 spin_lock_init(&bp->dmae_lock);
5854 rc = bnx2x_gunzip_init(bp); 5885 rc = bnx2x_gunzip_init(bp);
5855 if (rc) 5886 if (rc)
5856 return rc; 5887 return rc;
@@ -6002,6 +6033,8 @@ void bnx2x_free_mem(struct bnx2x *bp)
6002 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping, 6033 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
6003 BCM_PAGE_SIZE * NUM_EQ_PAGES); 6034 BCM_PAGE_SIZE * NUM_EQ_PAGES);
6004 6035
6036 BNX2X_FREE(bp->rx_indir_table);
6037
6005#undef BNX2X_PCI_FREE 6038#undef BNX2X_PCI_FREE
6006#undef BNX2X_KFREE 6039#undef BNX2X_KFREE
6007} 6040}
@@ -6132,6 +6165,9 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
6132 /* EQ */ 6165 /* EQ */
6133 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping, 6166 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
6134 BCM_PAGE_SIZE * NUM_EQ_PAGES); 6167 BCM_PAGE_SIZE * NUM_EQ_PAGES);
6168
6169 BNX2X_ALLOC(bp->rx_indir_table, sizeof(bp->rx_indir_table[0]) *
6170 TSTORM_INDIRECTION_TABLE_SIZE);
6135 return 0; 6171 return 0;
6136 6172
6137alloc_mem_err: 6173alloc_mem_err:
@@ -6185,12 +6221,14 @@ static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
6185 int ramrod_flags = WAIT_RAMROD_COMMON; 6221 int ramrod_flags = WAIT_RAMROD_COMMON;
6186 6222
6187 bp->set_mac_pending = 1; 6223 bp->set_mac_pending = 1;
6188 smp_wmb();
6189 6224
6190 config->hdr.length = 1; 6225 config->hdr.length = 1;
6191 config->hdr.offset = cam_offset; 6226 config->hdr.offset = cam_offset;
6192 config->hdr.client_id = 0xff; 6227 config->hdr.client_id = 0xff;
6193 config->hdr.reserved1 = 0; 6228 /* Mark the single MAC configuration ramrod as opposed to a
6229 * UC/MC list configuration).
6230 */
6231 config->hdr.echo = 1;
6194 6232
6195 /* primary MAC */ 6233 /* primary MAC */
6196 config->config_table[0].msb_mac_addr = 6234 config->config_table[0].msb_mac_addr =
@@ -6222,6 +6260,8 @@ static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
6222 config->config_table[0].middle_mac_addr, 6260 config->config_table[0].middle_mac_addr,
6223 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec); 6261 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
6224 6262
6263 mb();
6264
6225 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, 6265 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6226 U64_HI(bnx2x_sp_mapping(bp, mac_config)), 6266 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6227 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1); 6267 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
@@ -6286,20 +6326,15 @@ static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
6286 if (CHIP_IS_E1H(bp)) 6326 if (CHIP_IS_E1H(bp))
6287 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp); 6327 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
6288 else if (CHIP_MODE_IS_4_PORT(bp)) 6328 else if (CHIP_MODE_IS_4_PORT(bp))
6289 return BP_FUNC(bp) * 32 + rel_offset; 6329 return E2_FUNC_MAX * rel_offset + BP_FUNC(bp);
6290 else 6330 else
6291 return BP_VN(bp) * 32 + rel_offset; 6331 return E2_FUNC_MAX * rel_offset + BP_VN(bp);
6292} 6332}
6293 6333
6294/** 6334/**
6295 * LLH CAM line allocations: currently only iSCSI and ETH macs are 6335 * LLH CAM line allocations: currently only iSCSI and ETH macs are
6296 * relevant. In addition, current implementation is tuned for a 6336 * relevant. In addition, current implementation is tuned for a
6297 * single ETH MAC. 6337 * single ETH MAC.
6298 *
6299 * When multiple unicast ETH MACs PF configuration in switch
6300 * independent mode is required (NetQ, multiple netdev MACs,
6301 * etc.), consider better utilisation of 16 per function MAC
6302 * entries in the LLH memory.
6303 */ 6338 */
6304enum { 6339enum {
6305 LLH_CAM_ISCSI_ETH_LINE = 0, 6340 LLH_CAM_ISCSI_ETH_LINE = 0,
@@ -6374,14 +6409,37 @@ void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
6374 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1); 6409 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
6375 } 6410 }
6376} 6411}
6377static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset) 6412
6413static inline u8 bnx2x_e1_cam_mc_offset(struct bnx2x *bp)
6414{
6415 return CHIP_REV_IS_SLOW(bp) ?
6416 (BNX2X_MAX_EMUL_MULTI * (1 + BP_PORT(bp))) :
6417 (BNX2X_MAX_MULTICAST * (1 + BP_PORT(bp)));
6418}
6419
6420/* set mc list, do not wait as wait implies sleep and
6421 * set_rx_mode can be invoked from non-sleepable context.
6422 *
6423 * Instead we use the same ramrod data buffer each time we need
6424 * to configure a list of addresses, and use the fact that the
6425 * list of MACs is changed in an incremental way and that the
6426 * function is called under the netif_addr_lock. A temporary
6427 * inconsistent CAM configuration (possible in case of a very fast
6428 * sequence of add/del/add on the host side) will shortly be
6429 * restored by the handler of the last ramrod.
6430 */
6431static int bnx2x_set_e1_mc_list(struct bnx2x *bp)
6378{ 6432{
6379 int i = 0, old; 6433 int i = 0, old;
6380 struct net_device *dev = bp->dev; 6434 struct net_device *dev = bp->dev;
6435 u8 offset = bnx2x_e1_cam_mc_offset(bp);
6381 struct netdev_hw_addr *ha; 6436 struct netdev_hw_addr *ha;
6382 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config); 6437 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6383 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config); 6438 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6384 6439
6440 if (netdev_mc_count(dev) > BNX2X_MAX_MULTICAST)
6441 return -EINVAL;
6442
6385 netdev_for_each_mc_addr(ha, dev) { 6443 netdev_for_each_mc_addr(ha, dev) {
6386 /* copy mac */ 6444 /* copy mac */
6387 config_cmd->config_table[i].msb_mac_addr = 6445 config_cmd->config_table[i].msb_mac_addr =
@@ -6422,32 +6480,47 @@ static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
6422 } 6480 }
6423 } 6481 }
6424 6482
6483 wmb();
6484
6425 config_cmd->hdr.length = i; 6485 config_cmd->hdr.length = i;
6426 config_cmd->hdr.offset = offset; 6486 config_cmd->hdr.offset = offset;
6427 config_cmd->hdr.client_id = 0xff; 6487 config_cmd->hdr.client_id = 0xff;
6428 config_cmd->hdr.reserved1 = 0; 6488 /* Mark that this ramrod doesn't use bp->set_mac_pending for
6489 * synchronization.
6490 */
6491 config_cmd->hdr.echo = 0;
6429 6492
6430 bp->set_mac_pending = 1; 6493 mb();
6431 smp_wmb();
6432 6494
6433 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, 6495 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6434 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1); 6496 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6435} 6497}
6436static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp) 6498
6499void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp)
6437{ 6500{
6438 int i; 6501 int i;
6439 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config); 6502 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6440 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config); 6503 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6441 int ramrod_flags = WAIT_RAMROD_COMMON; 6504 int ramrod_flags = WAIT_RAMROD_COMMON;
6505 u8 offset = bnx2x_e1_cam_mc_offset(bp);
6442 6506
6443 bp->set_mac_pending = 1; 6507 for (i = 0; i < BNX2X_MAX_MULTICAST; i++)
6444 smp_wmb();
6445
6446 for (i = 0; i < config_cmd->hdr.length; i++)
6447 SET_FLAG(config_cmd->config_table[i].flags, 6508 SET_FLAG(config_cmd->config_table[i].flags,
6448 MAC_CONFIGURATION_ENTRY_ACTION_TYPE, 6509 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6449 T_ETH_MAC_COMMAND_INVALIDATE); 6510 T_ETH_MAC_COMMAND_INVALIDATE);
6450 6511
6512 wmb();
6513
6514 config_cmd->hdr.length = BNX2X_MAX_MULTICAST;
6515 config_cmd->hdr.offset = offset;
6516 config_cmd->hdr.client_id = 0xff;
6517 /* We'll wait for a completion this time... */
6518 config_cmd->hdr.echo = 1;
6519
6520 bp->set_mac_pending = 1;
6521
6522 mb();
6523
6451 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, 6524 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6452 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1); 6525 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6453 6526
@@ -6457,6 +6530,44 @@ static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
6457 6530
6458} 6531}
6459 6532
6533/* Accept one or more multicasts */
6534static int bnx2x_set_e1h_mc_list(struct bnx2x *bp)
6535{
6536 struct net_device *dev = bp->dev;
6537 struct netdev_hw_addr *ha;
6538 u32 mc_filter[MC_HASH_SIZE];
6539 u32 crc, bit, regidx;
6540 int i;
6541
6542 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
6543
6544 netdev_for_each_mc_addr(ha, dev) {
6545 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
6546 bnx2x_mc_addr(ha));
6547
6548 crc = crc32c_le(0, bnx2x_mc_addr(ha),
6549 ETH_ALEN);
6550 bit = (crc >> 24) & 0xff;
6551 regidx = bit >> 5;
6552 bit &= 0x1f;
6553 mc_filter[regidx] |= (1 << bit);
6554 }
6555
6556 for (i = 0; i < MC_HASH_SIZE; i++)
6557 REG_WR(bp, MC_HASH_OFFSET(bp, i),
6558 mc_filter[i]);
6559
6560 return 0;
6561}
6562
6563void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp)
6564{
6565 int i;
6566
6567 for (i = 0; i < MC_HASH_SIZE; i++)
6568 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6569}
6570
6460#ifdef BCM_CNIC 6571#ifdef BCM_CNIC
6461/** 6572/**
6462 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH 6573 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
@@ -6475,12 +6586,13 @@ static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
6475 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID + 6586 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID +
6476 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE; 6587 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
6477 u32 cl_bit_vec = (1 << iscsi_l2_cl_id); 6588 u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
6589 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
6478 6590
6479 /* Send a SET_MAC ramrod */ 6591 /* Send a SET_MAC ramrod */
6480 bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec, 6592 bnx2x_set_mac_addr_gen(bp, set, iscsi_mac, cl_bit_vec,
6481 cam_offset, 0); 6593 cam_offset, 0);
6482 6594
6483 bnx2x_set_mac_in_nig(bp, set, bp->iscsi_mac, LLH_CAM_ISCSI_ETH_LINE); 6595 bnx2x_set_mac_in_nig(bp, set, iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
6484 6596
6485 return 0; 6597 return 0;
6486} 6598}
@@ -7122,20 +7234,15 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
7122 /* Give HW time to discard old tx messages */ 7234 /* Give HW time to discard old tx messages */
7123 msleep(1); 7235 msleep(1);
7124 7236
7125 if (CHIP_IS_E1(bp)) { 7237 bnx2x_set_eth_mac(bp, 0);
7126 /* invalidate mc list,
7127 * wait and poll (interrupts are off)
7128 */
7129 bnx2x_invlidate_e1_mc_list(bp);
7130 bnx2x_set_eth_mac(bp, 0);
7131
7132 } else {
7133 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7134 7238
7135 bnx2x_set_eth_mac(bp, 0); 7239 bnx2x_invalidate_uc_list(bp);
7136 7240
7137 for (i = 0; i < MC_HASH_SIZE; i++) 7241 if (CHIP_IS_E1(bp))
7138 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0); 7242 bnx2x_invalidate_e1_mc_list(bp);
7243 else {
7244 bnx2x_invalidate_e1h_mc_list(bp);
7245 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7139 } 7246 }
7140 7247
7141#ifdef BCM_CNIC 7248#ifdef BCM_CNIC
@@ -8404,11 +8511,47 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8404 bp->common.shmem2_base); 8511 bp->common.shmem2_base);
8405} 8512}
8406 8513
8514#ifdef BCM_CNIC
8515static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
8516{
8517 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
8518 drv_lic_key[BP_PORT(bp)].max_iscsi_conn);
8519 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
8520 drv_lic_key[BP_PORT(bp)].max_fcoe_conn);
8521
8522 /* Get the number of maximum allowed iSCSI and FCoE connections */
8523 bp->cnic_eth_dev.max_iscsi_conn =
8524 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
8525 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
8526
8527 bp->cnic_eth_dev.max_fcoe_conn =
8528 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
8529 BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
8530
8531 BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
8532 bp->cnic_eth_dev.max_iscsi_conn,
8533 bp->cnic_eth_dev.max_fcoe_conn);
8534
8535 /* If mamimum allowed number of connections is zero -
8536 * disable the feature.
8537 */
8538 if (!bp->cnic_eth_dev.max_iscsi_conn)
8539 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
8540
8541 if (!bp->cnic_eth_dev.max_fcoe_conn)
8542 bp->flags |= NO_FCOE_FLAG;
8543}
8544#endif
8545
8407static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) 8546static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8408{ 8547{
8409 u32 val, val2; 8548 u32 val, val2;
8410 int func = BP_ABS_FUNC(bp); 8549 int func = BP_ABS_FUNC(bp);
8411 int port = BP_PORT(bp); 8550 int port = BP_PORT(bp);
8551#ifdef BCM_CNIC
8552 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
8553 u8 *fip_mac = bp->fip_mac;
8554#endif
8412 8555
8413 if (BP_NOMCP(bp)) { 8556 if (BP_NOMCP(bp)) {
8414 BNX2X_ERROR("warning: random MAC workaround active\n"); 8557 BNX2X_ERROR("warning: random MAC workaround active\n");
@@ -8421,7 +8564,9 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8421 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); 8564 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8422 8565
8423#ifdef BCM_CNIC 8566#ifdef BCM_CNIC
8424 /* iSCSI NPAR MAC */ 8567 /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
8568 * FCoE MAC then the appropriate feature should be disabled.
8569 */
8425 if (IS_MF_SI(bp)) { 8570 if (IS_MF_SI(bp)) {
8426 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg); 8571 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
8427 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { 8572 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
@@ -8429,8 +8574,39 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8429 iscsi_mac_addr_upper); 8574 iscsi_mac_addr_upper);
8430 val = MF_CFG_RD(bp, func_ext_config[func]. 8575 val = MF_CFG_RD(bp, func_ext_config[func].
8431 iscsi_mac_addr_lower); 8576 iscsi_mac_addr_lower);
8432 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2); 8577 BNX2X_DEV_INFO("Read iSCSI MAC: "
8433 } 8578 "0x%x:0x%04x\n", val2, val);
8579 bnx2x_set_mac_buf(iscsi_mac, val, val2);
8580
8581 /* Disable iSCSI OOO if MAC configuration is
8582 * invalid.
8583 */
8584 if (!is_valid_ether_addr(iscsi_mac)) {
8585 bp->flags |= NO_ISCSI_OOO_FLAG |
8586 NO_ISCSI_FLAG;
8587 memset(iscsi_mac, 0, ETH_ALEN);
8588 }
8589 } else
8590 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
8591
8592 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
8593 val2 = MF_CFG_RD(bp, func_ext_config[func].
8594 fcoe_mac_addr_upper);
8595 val = MF_CFG_RD(bp, func_ext_config[func].
8596 fcoe_mac_addr_lower);
8597 BNX2X_DEV_INFO("Read FCoE MAC to "
8598 "0x%x:0x%04x\n", val2, val);
8599 bnx2x_set_mac_buf(fip_mac, val, val2);
8600
8601 /* Disable FCoE if MAC configuration is
8602 * invalid.
8603 */
8604 if (!is_valid_ether_addr(fip_mac)) {
8605 bp->flags |= NO_FCOE_FLAG;
8606 memset(bp->fip_mac, 0, ETH_ALEN);
8607 }
8608 } else
8609 bp->flags |= NO_FCOE_FLAG;
8434 } 8610 }
8435#endif 8611#endif
8436 } else { 8612 } else {
@@ -8444,7 +8620,7 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8444 iscsi_mac_upper); 8620 iscsi_mac_upper);
8445 val = SHMEM_RD(bp, dev_info.port_hw_config[port]. 8621 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
8446 iscsi_mac_lower); 8622 iscsi_mac_lower);
8447 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2); 8623 bnx2x_set_mac_buf(iscsi_mac, val, val2);
8448#endif 8624#endif
8449 } 8625 }
8450 8626
@@ -8452,14 +8628,12 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8452 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); 8628 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8453 8629
8454#ifdef BCM_CNIC 8630#ifdef BCM_CNIC
8455 /* Inform the upper layers about FCoE MAC */ 8631 /* Set the FCoE MAC in modes other then MF_SI */
8456 if (!CHIP_IS_E1x(bp)) { 8632 if (!CHIP_IS_E1x(bp)) {
8457 if (IS_MF_SD(bp)) 8633 if (IS_MF_SD(bp))
8458 memcpy(bp->fip_mac, bp->dev->dev_addr, 8634 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
8459 sizeof(bp->fip_mac)); 8635 else if (!IS_MF(bp))
8460 else 8636 memcpy(fip_mac, iscsi_mac, ETH_ALEN);
8461 memcpy(bp->fip_mac, bp->iscsi_mac,
8462 sizeof(bp->fip_mac));
8463 } 8637 }
8464#endif 8638#endif
8465} 8639}
@@ -8622,6 +8796,10 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8622 /* Get MAC addresses */ 8796 /* Get MAC addresses */
8623 bnx2x_get_mac_hwinfo(bp); 8797 bnx2x_get_mac_hwinfo(bp);
8624 8798
8799#ifdef BCM_CNIC
8800 bnx2x_get_cnic_info(bp);
8801#endif
8802
8625 return rc; 8803 return rc;
8626} 8804}
8627 8805
@@ -8836,12 +9014,197 @@ static int bnx2x_close(struct net_device *dev)
8836 return 0; 9014 return 0;
8837} 9015}
8838 9016
9017#define E1_MAX_UC_LIST 29
9018#define E1H_MAX_UC_LIST 30
9019#define E2_MAX_UC_LIST 14
9020static inline u8 bnx2x_max_uc_list(struct bnx2x *bp)
9021{
9022 if (CHIP_IS_E1(bp))
9023 return E1_MAX_UC_LIST;
9024 else if (CHIP_IS_E1H(bp))
9025 return E1H_MAX_UC_LIST;
9026 else
9027 return E2_MAX_UC_LIST;
9028}
9029
9030
9031static inline u8 bnx2x_uc_list_cam_offset(struct bnx2x *bp)
9032{
9033 if (CHIP_IS_E1(bp))
9034 /* CAM Entries for Port0:
9035 * 0 - prim ETH MAC
9036 * 1 - BCAST MAC
9037 * 2 - iSCSI L2 ring ETH MAC
9038 * 3-31 - UC MACs
9039 *
9040 * Port1 entries are allocated the same way starting from
9041 * entry 32.
9042 */
9043 return 3 + 32 * BP_PORT(bp);
9044 else if (CHIP_IS_E1H(bp)) {
9045 /* CAM Entries:
9046 * 0-7 - prim ETH MAC for each function
9047 * 8-15 - iSCSI L2 ring ETH MAC for each function
9048 * 16 till 255 UC MAC lists for each function
9049 *
9050 * Remark: There is no FCoE support for E1H, thus FCoE related
9051 * MACs are not considered.
9052 */
9053 return E1H_FUNC_MAX * (CAM_ISCSI_ETH_LINE + 1) +
9054 bnx2x_max_uc_list(bp) * BP_FUNC(bp);
9055 } else {
9056 /* CAM Entries (there is a separate CAM per engine):
9057 * 0-4 - prim ETH MAC for each function
9058 * 4-7 - iSCSI L2 ring ETH MAC for each function
9059 * 8-11 - FIP ucast L2 MAC for each function
9060 * 12-15 - ALL_ENODE_MACS mcast MAC for each function
9061 * 16 till 71 UC MAC lists for each function
9062 */
9063 u8 func_idx =
9064 (CHIP_MODE_IS_4_PORT(bp) ? BP_FUNC(bp) : BP_VN(bp));
9065
9066 return E2_FUNC_MAX * (CAM_MAX_PF_LINE + 1) +
9067 bnx2x_max_uc_list(bp) * func_idx;
9068 }
9069}
9070
9071/* set uc list, do not wait as wait implies sleep and
9072 * set_rx_mode can be invoked from non-sleepable context.
9073 *
9074 * Instead we use the same ramrod data buffer each time we need
9075 * to configure a list of addresses, and use the fact that the
9076 * list of MACs is changed in an incremental way and that the
9077 * function is called under the netif_addr_lock. A temporary
9078 * inconsistent CAM configuration (possible in case of very fast
9079 * sequence of add/del/add on the host side) will shortly be
9080 * restored by the handler of the last ramrod.
9081 */
9082static int bnx2x_set_uc_list(struct bnx2x *bp)
9083{
9084 int i = 0, old;
9085 struct net_device *dev = bp->dev;
9086 u8 offset = bnx2x_uc_list_cam_offset(bp);
9087 struct netdev_hw_addr *ha;
9088 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
9089 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
9090
9091 if (netdev_uc_count(dev) > bnx2x_max_uc_list(bp))
9092 return -EINVAL;
9093
9094 netdev_for_each_uc_addr(ha, dev) {
9095 /* copy mac */
9096 config_cmd->config_table[i].msb_mac_addr =
9097 swab16(*(u16 *)&bnx2x_uc_addr(ha)[0]);
9098 config_cmd->config_table[i].middle_mac_addr =
9099 swab16(*(u16 *)&bnx2x_uc_addr(ha)[2]);
9100 config_cmd->config_table[i].lsb_mac_addr =
9101 swab16(*(u16 *)&bnx2x_uc_addr(ha)[4]);
9102
9103 config_cmd->config_table[i].vlan_id = 0;
9104 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
9105 config_cmd->config_table[i].clients_bit_vector =
9106 cpu_to_le32(1 << BP_L_ID(bp));
9107
9108 SET_FLAG(config_cmd->config_table[i].flags,
9109 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
9110 T_ETH_MAC_COMMAND_SET);
9111
9112 DP(NETIF_MSG_IFUP,
9113 "setting UCAST[%d] (%04x:%04x:%04x)\n", i,
9114 config_cmd->config_table[i].msb_mac_addr,
9115 config_cmd->config_table[i].middle_mac_addr,
9116 config_cmd->config_table[i].lsb_mac_addr);
9117
9118 i++;
9119
9120 /* Set uc MAC in NIG */
9121 bnx2x_set_mac_in_nig(bp, 1, bnx2x_uc_addr(ha),
9122 LLH_CAM_ETH_LINE + i);
9123 }
9124 old = config_cmd->hdr.length;
9125 if (old > i) {
9126 for (; i < old; i++) {
9127 if (CAM_IS_INVALID(config_cmd->
9128 config_table[i])) {
9129 /* already invalidated */
9130 break;
9131 }
9132 /* invalidate */
9133 SET_FLAG(config_cmd->config_table[i].flags,
9134 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
9135 T_ETH_MAC_COMMAND_INVALIDATE);
9136 }
9137 }
9138
9139 wmb();
9140
9141 config_cmd->hdr.length = i;
9142 config_cmd->hdr.offset = offset;
9143 config_cmd->hdr.client_id = 0xff;
9144 /* Mark that this ramrod doesn't use bp->set_mac_pending for
9145 * synchronization.
9146 */
9147 config_cmd->hdr.echo = 0;
9148
9149 mb();
9150
9151 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
9152 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
9153
9154}
9155
9156void bnx2x_invalidate_uc_list(struct bnx2x *bp)
9157{
9158 int i;
9159 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
9160 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
9161 int ramrod_flags = WAIT_RAMROD_COMMON;
9162 u8 offset = bnx2x_uc_list_cam_offset(bp);
9163 u8 max_list_size = bnx2x_max_uc_list(bp);
9164
9165 for (i = 0; i < max_list_size; i++) {
9166 SET_FLAG(config_cmd->config_table[i].flags,
9167 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
9168 T_ETH_MAC_COMMAND_INVALIDATE);
9169 bnx2x_set_mac_in_nig(bp, 0, NULL, LLH_CAM_ETH_LINE + 1 + i);
9170 }
9171
9172 wmb();
9173
9174 config_cmd->hdr.length = max_list_size;
9175 config_cmd->hdr.offset = offset;
9176 config_cmd->hdr.client_id = 0xff;
9177 /* We'll wait for a completion this time... */
9178 config_cmd->hdr.echo = 1;
9179
9180 bp->set_mac_pending = 1;
9181
9182 mb();
9183
9184 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
9185 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
9186
9187 /* Wait for a completion */
9188 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
9189 ramrod_flags);
9190
9191}
9192
9193static inline int bnx2x_set_mc_list(struct bnx2x *bp)
9194{
9195 /* some multicasts */
9196 if (CHIP_IS_E1(bp)) {
9197 return bnx2x_set_e1_mc_list(bp);
9198 } else { /* E1H and newer */
9199 return bnx2x_set_e1h_mc_list(bp);
9200 }
9201}
9202
8839/* called with netif_tx_lock from dev_mcast.c */ 9203/* called with netif_tx_lock from dev_mcast.c */
8840void bnx2x_set_rx_mode(struct net_device *dev) 9204void bnx2x_set_rx_mode(struct net_device *dev)
8841{ 9205{
8842 struct bnx2x *bp = netdev_priv(dev); 9206 struct bnx2x *bp = netdev_priv(dev);
8843 u32 rx_mode = BNX2X_RX_MODE_NORMAL; 9207 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
8844 int port = BP_PORT(bp);
8845 9208
8846 if (bp->state != BNX2X_STATE_OPEN) { 9209 if (bp->state != BNX2X_STATE_OPEN) {
8847 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); 9210 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
@@ -8852,47 +9215,16 @@ void bnx2x_set_rx_mode(struct net_device *dev)
8852 9215
8853 if (dev->flags & IFF_PROMISC) 9216 if (dev->flags & IFF_PROMISC)
8854 rx_mode = BNX2X_RX_MODE_PROMISC; 9217 rx_mode = BNX2X_RX_MODE_PROMISC;
8855 else if ((dev->flags & IFF_ALLMULTI) || 9218 else if (dev->flags & IFF_ALLMULTI)
8856 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
8857 CHIP_IS_E1(bp)))
8858 rx_mode = BNX2X_RX_MODE_ALLMULTI; 9219 rx_mode = BNX2X_RX_MODE_ALLMULTI;
8859 else { /* some multicasts */ 9220 else {
8860 if (CHIP_IS_E1(bp)) { 9221 /* some multicasts */
8861 /* 9222 if (bnx2x_set_mc_list(bp))
8862 * set mc list, do not wait as wait implies sleep 9223 rx_mode = BNX2X_RX_MODE_ALLMULTI;
8863 * and set_rx_mode can be invoked from non-sleepable
8864 * context
8865 */
8866 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
8867 BNX2X_MAX_EMUL_MULTI*(1 + port) :
8868 BNX2X_MAX_MULTICAST*(1 + port));
8869
8870 bnx2x_set_e1_mc_list(bp, offset);
8871 } else { /* E1H */
8872 /* Accept one or more multicasts */
8873 struct netdev_hw_addr *ha;
8874 u32 mc_filter[MC_HASH_SIZE];
8875 u32 crc, bit, regidx;
8876 int i;
8877
8878 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
8879
8880 netdev_for_each_mc_addr(ha, dev) {
8881 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
8882 bnx2x_mc_addr(ha));
8883
8884 crc = crc32c_le(0, bnx2x_mc_addr(ha),
8885 ETH_ALEN);
8886 bit = (crc >> 24) & 0xff;
8887 regidx = bit >> 5;
8888 bit &= 0x1f;
8889 mc_filter[regidx] |= (1 << bit);
8890 }
8891 9224
8892 for (i = 0; i < MC_HASH_SIZE; i++) 9225 /* some unicasts */
8893 REG_WR(bp, MC_HASH_OFFSET(bp, i), 9226 if (bnx2x_set_uc_list(bp))
8894 mc_filter[i]); 9227 rx_mode = BNX2X_RX_MODE_PROMISC;
8895 }
8896 } 9228 }
8897 9229
8898 bp->rx_mode = rx_mode; 9230 bp->rx_mode = rx_mode;
@@ -8973,7 +9305,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
8973 .ndo_stop = bnx2x_close, 9305 .ndo_stop = bnx2x_close,
8974 .ndo_start_xmit = bnx2x_start_xmit, 9306 .ndo_start_xmit = bnx2x_start_xmit,
8975 .ndo_select_queue = bnx2x_select_queue, 9307 .ndo_select_queue = bnx2x_select_queue,
8976 .ndo_set_multicast_list = bnx2x_set_rx_mode, 9308 .ndo_set_rx_mode = bnx2x_set_rx_mode,
8977 .ndo_set_mac_address = bnx2x_change_mac_addr, 9309 .ndo_set_mac_address = bnx2x_change_mac_addr,
8978 .ndo_validate_addr = eth_validate_addr, 9310 .ndo_validate_addr = eth_validate_addr,
8979 .ndo_do_ioctl = bnx2x_ioctl, 9311 .ndo_do_ioctl = bnx2x_ioctl,
@@ -9119,7 +9451,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
9119 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN); 9451 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9120 dev->vlan_features |= NETIF_F_TSO6; 9452 dev->vlan_features |= NETIF_F_TSO6;
9121 9453
9122#ifdef BCM_DCB 9454#ifdef BCM_DCBNL
9123 dev->dcbnl_ops = &bnx2x_dcbnl_ops; 9455 dev->dcbnl_ops = &bnx2x_dcbnl_ops;
9124#endif 9456#endif
9125 9457
@@ -9526,6 +9858,11 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9526 } 9858 }
9527#endif 9859#endif
9528 9860
9861#ifdef BCM_DCBNL
9862 /* Delete app tlvs from dcbnl */
9863 bnx2x_dcbnl_update_applist(bp, true);
9864#endif
9865
9529 unregister_netdev(dev); 9866 unregister_netdev(dev);
9530 9867
9531 /* Delete all NAPI objects */ 9868 /* Delete all NAPI objects */
@@ -9799,15 +10136,21 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
9799 HW_CID(bp, BNX2X_ISCSI_ETH_CID)); 10136 HW_CID(bp, BNX2X_ISCSI_ETH_CID));
9800 } 10137 }
9801 10138
9802 /* There may be not more than 8 L2 and COMMON SPEs and not more 10139 /* There may be not more than 8 L2 and not more than 8 L5 SPEs
9803 * than 8 L5 SPEs in the air. 10140 * We also check that the number of outstanding
10141 * COMMON ramrods is not more than the EQ and SPQ can
10142 * accommodate.
9804 */ 10143 */
9805 if ((type == NONE_CONNECTION_TYPE) || 10144 if (type == ETH_CONNECTION_TYPE) {
9806 (type == ETH_CONNECTION_TYPE)) { 10145 if (!atomic_read(&bp->cq_spq_left))
9807 if (!atomic_read(&bp->spq_left))
9808 break; 10146 break;
9809 else 10147 else
9810 atomic_dec(&bp->spq_left); 10148 atomic_dec(&bp->cq_spq_left);
10149 } else if (type == NONE_CONNECTION_TYPE) {
10150 if (!atomic_read(&bp->eq_spq_left))
10151 break;
10152 else
10153 atomic_dec(&bp->eq_spq_left);
9811 } else if ((type == ISCSI_CONNECTION_TYPE) || 10154 } else if ((type == ISCSI_CONNECTION_TYPE) ||
9812 (type == FCOE_CONNECTION_TYPE)) { 10155 (type == FCOE_CONNECTION_TYPE)) {
9813 if (bp->cnic_spq_pending >= 10156 if (bp->cnic_spq_pending >=
@@ -9885,7 +10228,8 @@ static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9885 int rc = 0; 10228 int rc = 0;
9886 10229
9887 mutex_lock(&bp->cnic_mutex); 10230 mutex_lock(&bp->cnic_mutex);
9888 c_ops = bp->cnic_ops; 10231 c_ops = rcu_dereference_protected(bp->cnic_ops,
10232 lockdep_is_held(&bp->cnic_mutex));
9889 if (c_ops) 10233 if (c_ops)
9890 rc = c_ops->cnic_ctl(bp->cnic_data, ctl); 10234 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9891 mutex_unlock(&bp->cnic_mutex); 10235 mutex_unlock(&bp->cnic_mutex);
@@ -9999,7 +10343,7 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
9999 int count = ctl->data.credit.credit_count; 10343 int count = ctl->data.credit.credit_count;
10000 10344
10001 smp_mb__before_atomic_inc(); 10345 smp_mb__before_atomic_inc();
10002 atomic_add(count, &bp->spq_left); 10346 atomic_add(count, &bp->cq_spq_left);
10003 smp_mb__after_atomic_inc(); 10347 smp_mb__after_atomic_inc();
10004 break; 10348 break;
10005 } 10349 }
@@ -10095,6 +10439,13 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
10095 struct bnx2x *bp = netdev_priv(dev); 10439 struct bnx2x *bp = netdev_priv(dev);
10096 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 10440 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10097 10441
10442 /* If both iSCSI and FCoE are disabled - return NULL in
10443 * order to indicate CNIC that it should not try to work
10444 * with this device.
10445 */
10446 if (NO_ISCSI(bp) && NO_FCOE(bp))
10447 return NULL;
10448
10098 cp->drv_owner = THIS_MODULE; 10449 cp->drv_owner = THIS_MODULE;
10099 cp->chip_id = CHIP_ID(bp); 10450 cp->chip_id = CHIP_ID(bp);
10100 cp->pdev = bp->pdev; 10451 cp->pdev = bp->pdev;
@@ -10115,6 +10466,15 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
10115 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE; 10466 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
10116 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID; 10467 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
10117 10468
10469 if (NO_ISCSI_OOO(bp))
10470 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
10471
10472 if (NO_ISCSI(bp))
10473 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
10474
10475 if (NO_FCOE(bp))
10476 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
10477
10118 DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, " 10478 DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
10119 "starting cid %d\n", 10479 "starting cid %d\n",
10120 cp->ctx_blk_size, 10480 cp->ctx_blk_size,
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index e01330bb36c..1c89f19a442 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -6083,6 +6083,7 @@ Theotherbitsarereservedandshouldbezero*/
6083#define MDIO_PMA_REG_8727_PCS_OPT_CTRL 0xc808 6083#define MDIO_PMA_REG_8727_PCS_OPT_CTRL 0xc808
6084#define MDIO_PMA_REG_8727_GPIO_CTRL 0xc80e 6084#define MDIO_PMA_REG_8727_GPIO_CTRL 0xc80e
6085#define MDIO_PMA_REG_8727_PCS_GP 0xc842 6085#define MDIO_PMA_REG_8727_PCS_GP 0xc842
6086#define MDIO_PMA_REG_8727_OPT_CFG_REG 0xc8e4
6086 6087
6087#define MDIO_AN_REG_8727_MISC_CTRL 0x8309 6088#define MDIO_AN_REG_8727_MISC_CTRL 0x8309
6088 6089
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 5c6fba802f2..9bc5de3e04a 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -604,7 +604,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
604 604
605 _lock_rx_hashtbl(bond); 605 _lock_rx_hashtbl(bond);
606 606
607 hash_index = _simple_hash((u8 *)&arp->ip_dst, sizeof(arp->ip_src)); 607 hash_index = _simple_hash((u8 *)&arp->ip_dst, sizeof(arp->ip_dst));
608 client_info = &(bond_info->rx_hashtbl[hash_index]); 608 client_info = &(bond_info->rx_hashtbl[hash_index]);
609 609
610 if (client_info->assigned) { 610 if (client_info->assigned) {
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 163e0b06eaa..0592e6da15a 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -59,7 +59,6 @@
59#include <linux/uaccess.h> 59#include <linux/uaccess.h>
60#include <linux/errno.h> 60#include <linux/errno.h>
61#include <linux/netdevice.h> 61#include <linux/netdevice.h>
62#include <linux/netpoll.h>
63#include <linux/inetdevice.h> 62#include <linux/inetdevice.h>
64#include <linux/igmp.h> 63#include <linux/igmp.h>
65#include <linux/etherdevice.h> 64#include <linux/etherdevice.h>
@@ -424,15 +423,9 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
424{ 423{
425 skb->dev = slave_dev; 424 skb->dev = slave_dev;
426 skb->priority = 1; 425 skb->priority = 1;
427#ifdef CONFIG_NET_POLL_CONTROLLER 426 if (unlikely(netpoll_tx_running(slave_dev)))
428 if (unlikely(bond->dev->priv_flags & IFF_IN_NETPOLL)) { 427 bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
429 struct netpoll *np = bond->dev->npinfo->netpoll; 428 else
430 slave_dev->npinfo = bond->dev->npinfo;
431 slave_dev->priv_flags |= IFF_IN_NETPOLL;
432 netpoll_send_skb_on_dev(np, skb, slave_dev);
433 slave_dev->priv_flags &= ~IFF_IN_NETPOLL;
434 } else
435#endif
436 dev_queue_xmit(skb); 429 dev_queue_xmit(skb);
437 430
438 return 0; 431 return 0;
@@ -1288,63 +1281,113 @@ static void bond_detach_slave(struct bonding *bond, struct slave *slave)
1288} 1281}
1289 1282
1290#ifdef CONFIG_NET_POLL_CONTROLLER 1283#ifdef CONFIG_NET_POLL_CONTROLLER
1291/* 1284static inline int slave_enable_netpoll(struct slave *slave)
1292 * You must hold read lock on bond->lock before calling this.
1293 */
1294static bool slaves_support_netpoll(struct net_device *bond_dev)
1295{ 1285{
1296 struct bonding *bond = netdev_priv(bond_dev); 1286 struct netpoll *np;
1297 struct slave *slave; 1287 int err = 0;
1298 int i = 0;
1299 bool ret = true;
1300 1288
1301 bond_for_each_slave(bond, slave, i) { 1289 np = kzalloc(sizeof(*np), GFP_KERNEL);
1302 if ((slave->dev->priv_flags & IFF_DISABLE_NETPOLL) || 1290 err = -ENOMEM;
1303 !slave->dev->netdev_ops->ndo_poll_controller) 1291 if (!np)
1304 ret = false; 1292 goto out;
1293
1294 np->dev = slave->dev;
1295 err = __netpoll_setup(np);
1296 if (err) {
1297 kfree(np);
1298 goto out;
1305 } 1299 }
1306 return i != 0 && ret; 1300 slave->np = np;
1301out:
1302 return err;
1303}
1304static inline void slave_disable_netpoll(struct slave *slave)
1305{
1306 struct netpoll *np = slave->np;
1307
1308 if (!np)
1309 return;
1310
1311 slave->np = NULL;
1312 synchronize_rcu_bh();
1313 __netpoll_cleanup(np);
1314 kfree(np);
1315}
1316static inline bool slave_dev_support_netpoll(struct net_device *slave_dev)
1317{
1318 if (slave_dev->priv_flags & IFF_DISABLE_NETPOLL)
1319 return false;
1320 if (!slave_dev->netdev_ops->ndo_poll_controller)
1321 return false;
1322 return true;
1307} 1323}
1308 1324
1309static void bond_poll_controller(struct net_device *bond_dev) 1325static void bond_poll_controller(struct net_device *bond_dev)
1310{ 1326{
1311 struct bonding *bond = netdev_priv(bond_dev); 1327}
1328
1329static void __bond_netpoll_cleanup(struct bonding *bond)
1330{
1312 struct slave *slave; 1331 struct slave *slave;
1313 int i; 1332 int i;
1314 1333
1315 bond_for_each_slave(bond, slave, i) { 1334 bond_for_each_slave(bond, slave, i)
1316 if (slave->dev && IS_UP(slave->dev)) 1335 if (IS_UP(slave->dev))
1317 netpoll_poll_dev(slave->dev); 1336 slave_disable_netpoll(slave);
1318 }
1319} 1337}
1320
1321static void bond_netpoll_cleanup(struct net_device *bond_dev) 1338static void bond_netpoll_cleanup(struct net_device *bond_dev)
1322{ 1339{
1323 struct bonding *bond = netdev_priv(bond_dev); 1340 struct bonding *bond = netdev_priv(bond_dev);
1341
1342 read_lock(&bond->lock);
1343 __bond_netpoll_cleanup(bond);
1344 read_unlock(&bond->lock);
1345}
1346
1347static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
1348{
1349 struct bonding *bond = netdev_priv(dev);
1324 struct slave *slave; 1350 struct slave *slave;
1325 const struct net_device_ops *ops; 1351 int i, err = 0;
1326 int i;
1327 1352
1328 read_lock(&bond->lock); 1353 read_lock(&bond->lock);
1329 bond_dev->npinfo = NULL;
1330 bond_for_each_slave(bond, slave, i) { 1354 bond_for_each_slave(bond, slave, i) {
1331 if (slave->dev) { 1355 if (!IS_UP(slave->dev))
1332 ops = slave->dev->netdev_ops; 1356 continue;
1333 if (ops->ndo_netpoll_cleanup) 1357 err = slave_enable_netpoll(slave);
1334 ops->ndo_netpoll_cleanup(slave->dev); 1358 if (err) {
1335 else 1359 __bond_netpoll_cleanup(bond);
1336 slave->dev->npinfo = NULL; 1360 break;
1337 } 1361 }
1338 } 1362 }
1339 read_unlock(&bond->lock); 1363 read_unlock(&bond->lock);
1364 return err;
1340} 1365}
1341 1366
1342#else 1367static struct netpoll_info *bond_netpoll_info(struct bonding *bond)
1368{
1369 return bond->dev->npinfo;
1370}
1343 1371
1372#else
1373static inline int slave_enable_netpoll(struct slave *slave)
1374{
1375 return 0;
1376}
1377static inline void slave_disable_netpoll(struct slave *slave)
1378{
1379}
1344static void bond_netpoll_cleanup(struct net_device *bond_dev) 1380static void bond_netpoll_cleanup(struct net_device *bond_dev)
1345{ 1381{
1346} 1382}
1347 1383static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
1384{
1385 return 0;
1386}
1387static struct netpoll_info *bond_netpoll_info(struct bonding *bond)
1388{
1389 return NULL;
1390}
1348#endif 1391#endif
1349 1392
1350/*---------------------------------- IOCTL ----------------------------------*/ 1393/*---------------------------------- IOCTL ----------------------------------*/
@@ -1372,8 +1415,8 @@ static int bond_compute_features(struct bonding *bond)
1372{ 1415{
1373 struct slave *slave; 1416 struct slave *slave;
1374 struct net_device *bond_dev = bond->dev; 1417 struct net_device *bond_dev = bond->dev;
1375 unsigned long features = bond_dev->features; 1418 u32 features = bond_dev->features;
1376 unsigned long vlan_features = 0; 1419 u32 vlan_features = 0;
1377 unsigned short max_hard_header_len = max((u16)ETH_HLEN, 1420 unsigned short max_hard_header_len = max((u16)ETH_HLEN,
1378 bond_dev->hard_header_len); 1421 bond_dev->hard_header_len);
1379 int i; 1422 int i;
@@ -1400,8 +1443,8 @@ static int bond_compute_features(struct bonding *bond)
1400 1443
1401done: 1444done:
1402 features |= (bond_dev->features & BOND_VLAN_FEATURES); 1445 features |= (bond_dev->features & BOND_VLAN_FEATURES);
1403 bond_dev->features = netdev_fix_features(features, NULL); 1446 bond_dev->features = netdev_fix_features(bond_dev, features);
1404 bond_dev->vlan_features = netdev_fix_features(vlan_features, NULL); 1447 bond_dev->vlan_features = netdev_fix_features(bond_dev, vlan_features);
1405 bond_dev->hard_header_len = max_hard_header_len; 1448 bond_dev->hard_header_len = max_hard_header_len;
1406 1449
1407 return 0; 1450 return 0;
@@ -1423,6 +1466,67 @@ static void bond_setup_by_slave(struct net_device *bond_dev,
1423 bond->setup_by_slave = 1; 1466 bond->setup_by_slave = 1;
1424} 1467}
1425 1468
1469/* On bonding slaves other than the currently active slave, suppress
1470 * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
1471 * ARP on active-backup slaves with arp_validate enabled.
1472 */
1473static bool bond_should_deliver_exact_match(struct sk_buff *skb,
1474 struct net_device *slave_dev,
1475 struct net_device *bond_dev)
1476{
1477 if (slave_dev->priv_flags & IFF_SLAVE_INACTIVE) {
1478 if (slave_dev->priv_flags & IFF_SLAVE_NEEDARP &&
1479 skb->protocol == __cpu_to_be16(ETH_P_ARP))
1480 return false;
1481
1482 if (bond_dev->priv_flags & IFF_MASTER_ALB &&
1483 skb->pkt_type != PACKET_BROADCAST &&
1484 skb->pkt_type != PACKET_MULTICAST)
1485 return false;
1486
1487 if (bond_dev->priv_flags & IFF_MASTER_8023AD &&
1488 skb->protocol == __cpu_to_be16(ETH_P_SLOW))
1489 return false;
1490
1491 return true;
1492 }
1493 return false;
1494}
1495
1496static struct sk_buff *bond_handle_frame(struct sk_buff *skb)
1497{
1498 struct net_device *slave_dev;
1499 struct net_device *bond_dev;
1500
1501 skb = skb_share_check(skb, GFP_ATOMIC);
1502 if (unlikely(!skb))
1503 return NULL;
1504 slave_dev = skb->dev;
1505 bond_dev = ACCESS_ONCE(slave_dev->master);
1506 if (unlikely(!bond_dev))
1507 return skb;
1508
1509 if (bond_dev->priv_flags & IFF_MASTER_ARPMON)
1510 slave_dev->last_rx = jiffies;
1511
1512 if (bond_should_deliver_exact_match(skb, slave_dev, bond_dev)) {
1513 skb->deliver_no_wcard = 1;
1514 return skb;
1515 }
1516
1517 skb->dev = bond_dev;
1518
1519 if (bond_dev->priv_flags & IFF_MASTER_ALB &&
1520 bond_dev->priv_flags & IFF_BRIDGE_PORT &&
1521 skb->pkt_type == PACKET_HOST) {
1522 u16 *dest = (u16 *) eth_hdr(skb)->h_dest;
1523
1524 memcpy(dest, bond_dev->dev_addr, ETH_ALEN);
1525 }
1526
1527 return skb;
1528}
1529
1426/* enslave device <slave> to bond device <master> */ 1530/* enslave device <slave> to bond device <master> */
1427int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) 1531int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1428{ 1532{
@@ -1594,16 +1698,22 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1594 } 1698 }
1595 } 1699 }
1596 1700
1597 res = netdev_set_master(slave_dev, bond_dev); 1701 res = netdev_set_bond_master(slave_dev, bond_dev);
1598 if (res) { 1702 if (res) {
1599 pr_debug("Error %d calling netdev_set_master\n", res); 1703 pr_debug("Error %d calling netdev_set_bond_master\n", res);
1600 goto err_restore_mac; 1704 goto err_restore_mac;
1601 } 1705 }
1706 res = netdev_rx_handler_register(slave_dev, bond_handle_frame, NULL);
1707 if (res) {
1708 pr_debug("Error %d calling netdev_rx_handler_register\n", res);
1709 goto err_unset_master;
1710 }
1711
1602 /* open the slave since the application closed it */ 1712 /* open the slave since the application closed it */
1603 res = dev_open(slave_dev); 1713 res = dev_open(slave_dev);
1604 if (res) { 1714 if (res) {
1605 pr_debug("Opening slave %s failed\n", slave_dev->name); 1715 pr_debug("Opening slave %s failed\n", slave_dev->name);
1606 goto err_unset_master; 1716 goto err_unreg_rxhandler;
1607 } 1717 }
1608 1718
1609 new_slave->dev = slave_dev; 1719 new_slave->dev = slave_dev;
@@ -1782,17 +1892,19 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1782 bond_set_carrier(bond); 1892 bond_set_carrier(bond);
1783 1893
1784#ifdef CONFIG_NET_POLL_CONTROLLER 1894#ifdef CONFIG_NET_POLL_CONTROLLER
1785 if (slaves_support_netpoll(bond_dev)) { 1895 slave_dev->npinfo = bond_netpoll_info(bond);
1786 bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL; 1896 if (slave_dev->npinfo) {
1787 if (bond_dev->npinfo) 1897 if (slave_enable_netpoll(new_slave)) {
1788 slave_dev->npinfo = bond_dev->npinfo; 1898 read_unlock(&bond->lock);
1789 } else if (!(bond_dev->priv_flags & IFF_DISABLE_NETPOLL)) { 1899 pr_info("Error, %s: master_dev is using netpoll, "
1790 bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; 1900 "but new slave device does not support netpoll.\n",
1791 pr_info("New slave device %s does not support netpoll\n", 1901 bond_dev->name);
1792 slave_dev->name); 1902 res = -EBUSY;
1793 pr_info("Disabling netpoll support for %s\n", bond_dev->name); 1903 goto err_close;
1904 }
1794 } 1905 }
1795#endif 1906#endif
1907
1796 read_unlock(&bond->lock); 1908 read_unlock(&bond->lock);
1797 1909
1798 res = bond_create_slave_symlinks(bond_dev, slave_dev); 1910 res = bond_create_slave_symlinks(bond_dev, slave_dev);
@@ -1811,8 +1923,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1811err_close: 1923err_close:
1812 dev_close(slave_dev); 1924 dev_close(slave_dev);
1813 1925
1926err_unreg_rxhandler:
1927 netdev_rx_handler_unregister(slave_dev);
1928
1814err_unset_master: 1929err_unset_master:
1815 netdev_set_master(slave_dev, NULL); 1930 netdev_set_bond_master(slave_dev, NULL);
1816 1931
1817err_restore_mac: 1932err_restore_mac:
1818 if (!bond->params.fail_over_mac) { 1933 if (!bond->params.fail_over_mac) {
@@ -1992,19 +2107,10 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1992 netif_addr_unlock_bh(bond_dev); 2107 netif_addr_unlock_bh(bond_dev);
1993 } 2108 }
1994 2109
1995 netdev_set_master(slave_dev, NULL); 2110 netdev_rx_handler_unregister(slave_dev);
2111 netdev_set_bond_master(slave_dev, NULL);
1996 2112
1997#ifdef CONFIG_NET_POLL_CONTROLLER 2113 slave_disable_netpoll(slave);
1998 read_lock_bh(&bond->lock);
1999
2000 if (slaves_support_netpoll(bond_dev))
2001 bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
2002 read_unlock_bh(&bond->lock);
2003 if (slave_dev->netdev_ops->ndo_netpoll_cleanup)
2004 slave_dev->netdev_ops->ndo_netpoll_cleanup(slave_dev);
2005 else
2006 slave_dev->npinfo = NULL;
2007#endif
2008 2114
2009 /* close slave before restoring its mac address */ 2115 /* close slave before restoring its mac address */
2010 dev_close(slave_dev); 2116 dev_close(slave_dev);
@@ -2039,6 +2145,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
2039 2145
2040 ret = bond_release(bond_dev, slave_dev); 2146 ret = bond_release(bond_dev, slave_dev);
2041 if ((ret == 0) && (bond->slave_cnt == 0)) { 2147 if ((ret == 0) && (bond->slave_cnt == 0)) {
2148 bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
2042 pr_info("%s: destroying bond %s.\n", 2149 pr_info("%s: destroying bond %s.\n",
2043 bond_dev->name, bond_dev->name); 2150 bond_dev->name, bond_dev->name);
2044 unregister_netdevice(bond_dev); 2151 unregister_netdevice(bond_dev);
@@ -2114,7 +2221,10 @@ static int bond_release_all(struct net_device *bond_dev)
2114 netif_addr_unlock_bh(bond_dev); 2221 netif_addr_unlock_bh(bond_dev);
2115 } 2222 }
2116 2223
2117 netdev_set_master(slave_dev, NULL); 2224 netdev_rx_handler_unregister(slave_dev);
2225 netdev_set_bond_master(slave_dev, NULL);
2226
2227 slave_disable_netpoll(slave);
2118 2228
2119 /* close slave before restoring its mac address */ 2229 /* close slave before restoring its mac address */
2120 dev_close(slave_dev); 2230 dev_close(slave_dev);
@@ -2571,7 +2681,7 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_
2571 2681
2572static void bond_arp_send_all(struct bonding *bond, struct slave *slave) 2682static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2573{ 2683{
2574 int i, vlan_id, rv; 2684 int i, vlan_id;
2575 __be32 *targets = bond->params.arp_targets; 2685 __be32 *targets = bond->params.arp_targets;
2576 struct vlan_entry *vlan; 2686 struct vlan_entry *vlan;
2577 struct net_device *vlan_dev; 2687 struct net_device *vlan_dev;
@@ -2598,8 +2708,8 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2598 fl.fl4_dst = targets[i]; 2708 fl.fl4_dst = targets[i];
2599 fl.fl4_tos = RTO_ONLINK; 2709 fl.fl4_tos = RTO_ONLINK;
2600 2710
2601 rv = ip_route_output_key(dev_net(bond->dev), &rt, &fl); 2711 rt = ip_route_output_key(dev_net(bond->dev), &fl);
2602 if (rv) { 2712 if (IS_ERR(rt)) {
2603 if (net_ratelimit()) { 2713 if (net_ratelimit()) {
2604 pr_warning("%s: no route to arp_ip_target %pI4\n", 2714 pr_warning("%s: no route to arp_ip_target %pI4\n",
2605 bond->dev->name, &fl.fl4_dst); 2715 bond->dev->name, &fl.fl4_dst);
@@ -4654,9 +4764,12 @@ static const struct net_device_ops bond_netdev_ops = {
4654 .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid, 4764 .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid,
4655 .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid, 4765 .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid,
4656#ifdef CONFIG_NET_POLL_CONTROLLER 4766#ifdef CONFIG_NET_POLL_CONTROLLER
4767 .ndo_netpoll_setup = bond_netpoll_setup,
4657 .ndo_netpoll_cleanup = bond_netpoll_cleanup, 4768 .ndo_netpoll_cleanup = bond_netpoll_cleanup,
4658 .ndo_poll_controller = bond_poll_controller, 4769 .ndo_poll_controller = bond_poll_controller,
4659#endif 4770#endif
4771 .ndo_add_slave = bond_enslave,
4772 .ndo_del_slave = bond_release,
4660}; 4773};
4661 4774
4662static void bond_destructor(struct net_device *bond_dev) 4775static void bond_destructor(struct net_device *bond_dev)
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 8fd0174c538..72bb0f6cc9b 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -1198,7 +1198,7 @@ static ssize_t bonding_store_carrier(struct device *d,
1198 bond->dev->name, new_value); 1198 bond->dev->name, new_value);
1199 } 1199 }
1200out: 1200out:
1201 return count; 1201 return ret;
1202} 1202}
1203static DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR, 1203static DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR,
1204 bonding_show_carrier, bonding_store_carrier); 1204 bonding_show_carrier, bonding_store_carrier);
@@ -1595,7 +1595,7 @@ static ssize_t bonding_store_slaves_active(struct device *d,
1595 } 1595 }
1596 } 1596 }
1597out: 1597out:
1598 return count; 1598 return ret;
1599} 1599}
1600static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR, 1600static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR,
1601 bonding_show_slaves_active, bonding_store_slaves_active); 1601 bonding_show_slaves_active, bonding_store_slaves_active);
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 31fe980e4e2..ff4e2698022 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -20,6 +20,7 @@
20#include <linux/if_bonding.h> 20#include <linux/if_bonding.h>
21#include <linux/cpumask.h> 21#include <linux/cpumask.h>
22#include <linux/in6.h> 22#include <linux/in6.h>
23#include <linux/netpoll.h>
23#include "bond_3ad.h" 24#include "bond_3ad.h"
24#include "bond_alb.h" 25#include "bond_alb.h"
25 26
@@ -132,7 +133,7 @@ static inline void unblock_netpoll_tx(void)
132 133
133static inline int is_netpoll_tx_blocked(struct net_device *dev) 134static inline int is_netpoll_tx_blocked(struct net_device *dev)
134{ 135{
135 if (unlikely(dev->priv_flags & IFF_IN_NETPOLL)) 136 if (unlikely(netpoll_tx_running(dev)))
136 return atomic_read(&netpoll_block_tx); 137 return atomic_read(&netpoll_block_tx);
137 return 0; 138 return 0;
138} 139}
@@ -198,6 +199,9 @@ struct slave {
198 u16 queue_id; 199 u16 queue_id;
199 struct ad_slave_info ad_info; /* HUGE - better to dynamically alloc */ 200 struct ad_slave_info ad_info; /* HUGE - better to dynamically alloc */
200 struct tlb_slave_info tlb_info; 201 struct tlb_slave_info tlb_info;
202#ifdef CONFIG_NET_POLL_CONTROLLER
203 struct netpoll *np;
204#endif
201}; 205};
202 206
203/* 207/*
@@ -265,7 +269,8 @@ struct bonding {
265 * 269 *
266 * Caller must hold bond lock for read 270 * Caller must hold bond lock for read
267 */ 271 */
268static inline struct slave *bond_get_slave_by_dev(struct bonding *bond, struct net_device *slave_dev) 272static inline struct slave *bond_get_slave_by_dev(struct bonding *bond,
273 struct net_device *slave_dev)
269{ 274{
270 struct slave *slave = NULL; 275 struct slave *slave = NULL;
271 int i; 276 int i;
@@ -276,7 +281,7 @@ static inline struct slave *bond_get_slave_by_dev(struct bonding *bond, struct n
276 } 281 }
277 } 282 }
278 283
279 return 0; 284 return NULL;
280} 285}
281 286
282static inline struct bonding *bond_get_bond_by_slave(struct slave *slave) 287static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
@@ -323,6 +328,22 @@ static inline unsigned long slave_last_rx(struct bonding *bond,
323 return slave->dev->last_rx; 328 return slave->dev->last_rx;
324} 329}
325 330
331#ifdef CONFIG_NET_POLL_CONTROLLER
332static inline void bond_netpoll_send_skb(const struct slave *slave,
333 struct sk_buff *skb)
334{
335 struct netpoll *np = slave->np;
336
337 if (np)
338 netpoll_send_skb(np, skb);
339}
340#else
341static inline void bond_netpoll_send_skb(const struct slave *slave,
342 struct sk_buff *skb)
343{
344}
345#endif
346
326static inline void bond_set_slave_inactive_flags(struct slave *slave) 347static inline void bond_set_slave_inactive_flags(struct slave *slave)
327{ 348{
328 struct bonding *bond = netdev_priv(slave->dev->master); 349 struct bonding *bond = netdev_priv(slave->dev->master);
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 5dec456fd4a..1d699e3df54 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -115,6 +115,8 @@ source "drivers/net/can/mscan/Kconfig"
115 115
116source "drivers/net/can/sja1000/Kconfig" 116source "drivers/net/can/sja1000/Kconfig"
117 117
118source "drivers/net/can/c_can/Kconfig"
119
118source "drivers/net/can/usb/Kconfig" 120source "drivers/net/can/usb/Kconfig"
119 121
120source "drivers/net/can/softing/Kconfig" 122source "drivers/net/can/softing/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 53c82a71778..24ebfe8d758 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -13,6 +13,7 @@ obj-y += softing/
13 13
14obj-$(CONFIG_CAN_SJA1000) += sja1000/ 14obj-$(CONFIG_CAN_SJA1000) += sja1000/
15obj-$(CONFIG_CAN_MSCAN) += mscan/ 15obj-$(CONFIG_CAN_MSCAN) += mscan/
16obj-$(CONFIG_CAN_C_CAN) += c_can/
16obj-$(CONFIG_CAN_AT91) += at91_can.o 17obj-$(CONFIG_CAN_AT91) += at91_can.o
17obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o 18obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
18obj-$(CONFIG_CAN_MCP251X) += mcp251x.o 19obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
diff --git a/drivers/net/can/c_can/Kconfig b/drivers/net/can/c_can/Kconfig
new file mode 100644
index 00000000000..ffb9773d102
--- /dev/null
+++ b/drivers/net/can/c_can/Kconfig
@@ -0,0 +1,15 @@
1menuconfig CAN_C_CAN
2 tristate "Bosch C_CAN devices"
3 depends on CAN_DEV && HAS_IOMEM
4
5if CAN_C_CAN
6
7config CAN_C_CAN_PLATFORM
8 tristate "Generic Platform Bus based C_CAN driver"
9 ---help---
10 This driver adds support for the C_CAN chips connected to
11 the "platform bus" (Linux abstraction for directly to the
12 processor attached devices) which can be found on various
13 boards from ST Microelectronics (http://www.st.com)
14 like the SPEAr1310 and SPEAr320 evaluation boards.
15endif
diff --git a/drivers/net/can/c_can/Makefile b/drivers/net/can/c_can/Makefile
new file mode 100644
index 00000000000..9273f6d5c4b
--- /dev/null
+++ b/drivers/net/can/c_can/Makefile
@@ -0,0 +1,8 @@
1#
2# Makefile for the Bosch C_CAN controller drivers.
3#
4
5obj-$(CONFIG_CAN_C_CAN) += c_can.o
6obj-$(CONFIG_CAN_C_CAN_PLATFORM) += c_can_platform.o
7
8ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
new file mode 100644
index 00000000000..14050786218
--- /dev/null
+++ b/drivers/net/can/c_can/c_can.c
@@ -0,0 +1,1158 @@
1/*
2 * CAN bus driver for Bosch C_CAN controller
3 *
4 * Copyright (C) 2010 ST Microelectronics
5 * Bhupesh Sharma <bhupesh.sharma@st.com>
6 *
7 * Borrowed heavily from the C_CAN driver originally written by:
8 * Copyright (C) 2007
9 * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
10 * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
11 *
12 * TX and RX NAPI implementation has been borrowed from at91 CAN driver
13 * written by:
14 * Copyright
15 * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de>
16 * (C) 2008, 2009 by Marc Kleine-Budde <kernel@pengutronix.de>
17 *
18 * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B.
19 * Bosch C_CAN user manual can be obtained from:
20 * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/
21 * users_manual_c_can.pdf
22 *
23 * This file is licensed under the terms of the GNU General Public
24 * License version 2. This program is licensed "as is" without any
25 * warranty of any kind, whether express or implied.
26 */
27
28#include <linux/kernel.h>
29#include <linux/version.h>
30#include <linux/module.h>
31#include <linux/interrupt.h>
32#include <linux/delay.h>
33#include <linux/netdevice.h>
34#include <linux/if_arp.h>
35#include <linux/if_ether.h>
36#include <linux/list.h>
37#include <linux/delay.h>
38#include <linux/io.h>
39
40#include <linux/can.h>
41#include <linux/can/dev.h>
42#include <linux/can/error.h>
43
44#include "c_can.h"
45
46/* control register */
47#define CONTROL_TEST BIT(7)
48#define CONTROL_CCE BIT(6)
49#define CONTROL_DISABLE_AR BIT(5)
50#define CONTROL_ENABLE_AR (0 << 5)
51#define CONTROL_EIE BIT(3)
52#define CONTROL_SIE BIT(2)
53#define CONTROL_IE BIT(1)
54#define CONTROL_INIT BIT(0)
55
56/* test register */
57#define TEST_RX BIT(7)
58#define TEST_TX1 BIT(6)
59#define TEST_TX2 BIT(5)
60#define TEST_LBACK BIT(4)
61#define TEST_SILENT BIT(3)
62#define TEST_BASIC BIT(2)
63
64/* status register */
65#define STATUS_BOFF BIT(7)
66#define STATUS_EWARN BIT(6)
67#define STATUS_EPASS BIT(5)
68#define STATUS_RXOK BIT(4)
69#define STATUS_TXOK BIT(3)
70
71/* error counter register */
72#define ERR_CNT_TEC_MASK 0xff
73#define ERR_CNT_TEC_SHIFT 0
74#define ERR_CNT_REC_SHIFT 8
75#define ERR_CNT_REC_MASK (0x7f << ERR_CNT_REC_SHIFT)
76#define ERR_CNT_RP_SHIFT 15
77#define ERR_CNT_RP_MASK (0x1 << ERR_CNT_RP_SHIFT)
78
79/* bit-timing register */
80#define BTR_BRP_MASK 0x3f
81#define BTR_BRP_SHIFT 0
82#define BTR_SJW_SHIFT 6
83#define BTR_SJW_MASK (0x3 << BTR_SJW_SHIFT)
84#define BTR_TSEG1_SHIFT 8
85#define BTR_TSEG1_MASK (0xf << BTR_TSEG1_SHIFT)
86#define BTR_TSEG2_SHIFT 12
87#define BTR_TSEG2_MASK (0x7 << BTR_TSEG2_SHIFT)
88
89/* brp extension register */
90#define BRP_EXT_BRPE_MASK 0x0f
91#define BRP_EXT_BRPE_SHIFT 0
92
93/* IFx command request */
94#define IF_COMR_BUSY BIT(15)
95
96/* IFx command mask */
97#define IF_COMM_WR BIT(7)
98#define IF_COMM_MASK BIT(6)
99#define IF_COMM_ARB BIT(5)
100#define IF_COMM_CONTROL BIT(4)
101#define IF_COMM_CLR_INT_PND BIT(3)
102#define IF_COMM_TXRQST BIT(2)
103#define IF_COMM_DATAA BIT(1)
104#define IF_COMM_DATAB BIT(0)
105#define IF_COMM_ALL (IF_COMM_MASK | IF_COMM_ARB | \
106 IF_COMM_CONTROL | IF_COMM_TXRQST | \
107 IF_COMM_DATAA | IF_COMM_DATAB)
108
109/* IFx arbitration */
110#define IF_ARB_MSGVAL BIT(15)
111#define IF_ARB_MSGXTD BIT(14)
112#define IF_ARB_TRANSMIT BIT(13)
113
114/* IFx message control */
115#define IF_MCONT_NEWDAT BIT(15)
116#define IF_MCONT_MSGLST BIT(14)
117#define IF_MCONT_CLR_MSGLST (0 << 14)
118#define IF_MCONT_INTPND BIT(13)
119#define IF_MCONT_UMASK BIT(12)
120#define IF_MCONT_TXIE BIT(11)
121#define IF_MCONT_RXIE BIT(10)
122#define IF_MCONT_RMTEN BIT(9)
123#define IF_MCONT_TXRQST BIT(8)
124#define IF_MCONT_EOB BIT(7)
125#define IF_MCONT_DLC_MASK 0xf
126
127/*
128 * IFx register masks:
129 * allow easy operation on 16-bit registers when the
130 * argument is 32-bit instead
131 */
132#define IFX_WRITE_LOW_16BIT(x) ((x) & 0xFFFF)
133#define IFX_WRITE_HIGH_16BIT(x) (((x) & 0xFFFF0000) >> 16)
134
135/* message object split */
136#define C_CAN_NO_OF_OBJECTS 32
137#define C_CAN_MSG_OBJ_RX_NUM 16
138#define C_CAN_MSG_OBJ_TX_NUM 16
139
140#define C_CAN_MSG_OBJ_RX_FIRST 1
141#define C_CAN_MSG_OBJ_RX_LAST (C_CAN_MSG_OBJ_RX_FIRST + \
142 C_CAN_MSG_OBJ_RX_NUM - 1)
143
144#define C_CAN_MSG_OBJ_TX_FIRST (C_CAN_MSG_OBJ_RX_LAST + 1)
145#define C_CAN_MSG_OBJ_TX_LAST (C_CAN_MSG_OBJ_TX_FIRST + \
146 C_CAN_MSG_OBJ_TX_NUM - 1)
147
148#define C_CAN_MSG_OBJ_RX_SPLIT 9
149#define C_CAN_MSG_RX_LOW_LAST (C_CAN_MSG_OBJ_RX_SPLIT - 1)
150
151#define C_CAN_NEXT_MSG_OBJ_MASK (C_CAN_MSG_OBJ_TX_NUM - 1)
152#define RECEIVE_OBJECT_BITS 0x0000ffff
153
154/* status interrupt */
155#define STATUS_INTERRUPT 0x8000
156
157/* global interrupt masks */
158#define ENABLE_ALL_INTERRUPTS 1
159#define DISABLE_ALL_INTERRUPTS 0
160
161/* minimum timeout for checking BUSY status */
162#define MIN_TIMEOUT_VALUE 6
163
164/* napi related */
165#define C_CAN_NAPI_WEIGHT C_CAN_MSG_OBJ_RX_NUM
166
167/* c_can lec values */
168enum c_can_lec_type {
169 LEC_NO_ERROR = 0,
170 LEC_STUFF_ERROR,
171 LEC_FORM_ERROR,
172 LEC_ACK_ERROR,
173 LEC_BIT1_ERROR,
174 LEC_BIT0_ERROR,
175 LEC_CRC_ERROR,
176 LEC_UNUSED,
177};
178
179/*
180 * c_can error types:
181 * Bus errors (BUS_OFF, ERROR_WARNING, ERROR_PASSIVE) are supported
182 */
183enum c_can_bus_error_types {
184 C_CAN_NO_ERROR = 0,
185 C_CAN_BUS_OFF,
186 C_CAN_ERROR_WARNING,
187 C_CAN_ERROR_PASSIVE,
188};
189
190static struct can_bittiming_const c_can_bittiming_const = {
191 .name = KBUILD_MODNAME,
192 .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
193 .tseg1_max = 16,
194 .tseg2_min = 1, /* Time segment 2 = phase_seg2 */
195 .tseg2_max = 8,
196 .sjw_max = 4,
197 .brp_min = 1,
198 .brp_max = 1024, /* 6-bit BRP field + 4-bit BRPE field*/
199 .brp_inc = 1,
200};
201
202static inline int get_tx_next_msg_obj(const struct c_can_priv *priv)
203{
204 return (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) +
205 C_CAN_MSG_OBJ_TX_FIRST;
206}
207
208static inline int get_tx_echo_msg_obj(const struct c_can_priv *priv)
209{
210 return (priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) +
211 C_CAN_MSG_OBJ_TX_FIRST;
212}
213
214static u32 c_can_read_reg32(struct c_can_priv *priv, void *reg)
215{
216 u32 val = priv->read_reg(priv, reg);
217 val |= ((u32) priv->read_reg(priv, reg + 2)) << 16;
218 return val;
219}
220
221static void c_can_enable_all_interrupts(struct c_can_priv *priv,
222 int enable)
223{
224 unsigned int cntrl_save = priv->read_reg(priv,
225 &priv->regs->control);
226
227 if (enable)
228 cntrl_save |= (CONTROL_SIE | CONTROL_EIE | CONTROL_IE);
229 else
230 cntrl_save &= ~(CONTROL_EIE | CONTROL_IE | CONTROL_SIE);
231
232 priv->write_reg(priv, &priv->regs->control, cntrl_save);
233}
234
235static inline int c_can_msg_obj_is_busy(struct c_can_priv *priv, int iface)
236{
237 int count = MIN_TIMEOUT_VALUE;
238
239 while (count && priv->read_reg(priv,
240 &priv->regs->ifregs[iface].com_req) &
241 IF_COMR_BUSY) {
242 count--;
243 udelay(1);
244 }
245
246 if (!count)
247 return 1;
248
249 return 0;
250}
251
252static inline void c_can_object_get(struct net_device *dev,
253 int iface, int objno, int mask)
254{
255 struct c_can_priv *priv = netdev_priv(dev);
256
257 /*
258 * As per specs, after writting the message object number in the
259 * IF command request register the transfer b/w interface
260 * register and message RAM must be complete in 6 CAN-CLK
261 * period.
262 */
263 priv->write_reg(priv, &priv->regs->ifregs[iface].com_mask,
264 IFX_WRITE_LOW_16BIT(mask));
265 priv->write_reg(priv, &priv->regs->ifregs[iface].com_req,
266 IFX_WRITE_LOW_16BIT(objno));
267
268 if (c_can_msg_obj_is_busy(priv, iface))
269 netdev_err(dev, "timed out in object get\n");
270}
271
272static inline void c_can_object_put(struct net_device *dev,
273 int iface, int objno, int mask)
274{
275 struct c_can_priv *priv = netdev_priv(dev);
276
277 /*
278 * As per specs, after writting the message object number in the
279 * IF command request register the transfer b/w interface
280 * register and message RAM must be complete in 6 CAN-CLK
281 * period.
282 */
283 priv->write_reg(priv, &priv->regs->ifregs[iface].com_mask,
284 (IF_COMM_WR | IFX_WRITE_LOW_16BIT(mask)));
285 priv->write_reg(priv, &priv->regs->ifregs[iface].com_req,
286 IFX_WRITE_LOW_16BIT(objno));
287
288 if (c_can_msg_obj_is_busy(priv, iface))
289 netdev_err(dev, "timed out in object put\n");
290}
291
292static void c_can_write_msg_object(struct net_device *dev,
293 int iface, struct can_frame *frame, int objno)
294{
295 int i;
296 u16 flags = 0;
297 unsigned int id;
298 struct c_can_priv *priv = netdev_priv(dev);
299
300 if (!(frame->can_id & CAN_RTR_FLAG))
301 flags |= IF_ARB_TRANSMIT;
302
303 if (frame->can_id & CAN_EFF_FLAG) {
304 id = frame->can_id & CAN_EFF_MASK;
305 flags |= IF_ARB_MSGXTD;
306 } else
307 id = ((frame->can_id & CAN_SFF_MASK) << 18);
308
309 flags |= IF_ARB_MSGVAL;
310
311 priv->write_reg(priv, &priv->regs->ifregs[iface].arb1,
312 IFX_WRITE_LOW_16BIT(id));
313 priv->write_reg(priv, &priv->regs->ifregs[iface].arb2, flags |
314 IFX_WRITE_HIGH_16BIT(id));
315
316 for (i = 0; i < frame->can_dlc; i += 2) {
317 priv->write_reg(priv, &priv->regs->ifregs[iface].data[i / 2],
318 frame->data[i] | (frame->data[i + 1] << 8));
319 }
320
321 /* enable interrupt for this message object */
322 priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
323 IF_MCONT_TXIE | IF_MCONT_TXRQST | IF_MCONT_EOB |
324 frame->can_dlc);
325 c_can_object_put(dev, iface, objno, IF_COMM_ALL);
326}
327
328static inline void c_can_mark_rx_msg_obj(struct net_device *dev,
329 int iface, int ctrl_mask,
330 int obj)
331{
332 struct c_can_priv *priv = netdev_priv(dev);
333
334 priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
335 ctrl_mask & ~(IF_MCONT_MSGLST | IF_MCONT_INTPND));
336 c_can_object_put(dev, iface, obj, IF_COMM_CONTROL);
337
338}
339
340static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev,
341 int iface,
342 int ctrl_mask)
343{
344 int i;
345 struct c_can_priv *priv = netdev_priv(dev);
346
347 for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++) {
348 priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
349 ctrl_mask & ~(IF_MCONT_MSGLST |
350 IF_MCONT_INTPND | IF_MCONT_NEWDAT));
351 c_can_object_put(dev, iface, i, IF_COMM_CONTROL);
352 }
353}
354
355static inline void c_can_activate_rx_msg_obj(struct net_device *dev,
356 int iface, int ctrl_mask,
357 int obj)
358{
359 struct c_can_priv *priv = netdev_priv(dev);
360
361 priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
362 ctrl_mask & ~(IF_MCONT_MSGLST |
363 IF_MCONT_INTPND | IF_MCONT_NEWDAT));
364 c_can_object_put(dev, iface, obj, IF_COMM_CONTROL);
365}
366
367static void c_can_handle_lost_msg_obj(struct net_device *dev,
368 int iface, int objno)
369{
370 struct c_can_priv *priv = netdev_priv(dev);
371 struct net_device_stats *stats = &dev->stats;
372 struct sk_buff *skb;
373 struct can_frame *frame;
374
375 netdev_err(dev, "msg lost in buffer %d\n", objno);
376
377 c_can_object_get(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST);
378
379 priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
380 IF_MCONT_CLR_MSGLST);
381
382 c_can_object_put(dev, 0, objno, IF_COMM_CONTROL);
383
384 /* create an error msg */
385 skb = alloc_can_err_skb(dev, &frame);
386 if (unlikely(!skb))
387 return;
388
389 frame->can_id |= CAN_ERR_CRTL;
390 frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
391 stats->rx_errors++;
392 stats->rx_over_errors++;
393
394 netif_receive_skb(skb);
395}
396
397static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl)
398{
399 u16 flags, data;
400 int i;
401 unsigned int val;
402 struct c_can_priv *priv = netdev_priv(dev);
403 struct net_device_stats *stats = &dev->stats;
404 struct sk_buff *skb;
405 struct can_frame *frame;
406
407 skb = alloc_can_skb(dev, &frame);
408 if (!skb) {
409 stats->rx_dropped++;
410 return -ENOMEM;
411 }
412
413 frame->can_dlc = get_can_dlc(ctrl & 0x0F);
414
415 flags = priv->read_reg(priv, &priv->regs->ifregs[iface].arb2);
416 val = priv->read_reg(priv, &priv->regs->ifregs[iface].arb1) |
417 (flags << 16);
418
419 if (flags & IF_ARB_MSGXTD)
420 frame->can_id = (val & CAN_EFF_MASK) | CAN_EFF_FLAG;
421 else
422 frame->can_id = (val >> 18) & CAN_SFF_MASK;
423
424 if (flags & IF_ARB_TRANSMIT)
425 frame->can_id |= CAN_RTR_FLAG;
426 else {
427 for (i = 0; i < frame->can_dlc; i += 2) {
428 data = priv->read_reg(priv,
429 &priv->regs->ifregs[iface].data[i / 2]);
430 frame->data[i] = data;
431 frame->data[i + 1] = data >> 8;
432 }
433 }
434
435 netif_receive_skb(skb);
436
437 stats->rx_packets++;
438 stats->rx_bytes += frame->can_dlc;
439
440 return 0;
441}
442
443static void c_can_setup_receive_object(struct net_device *dev, int iface,
444 int objno, unsigned int mask,
445 unsigned int id, unsigned int mcont)
446{
447 struct c_can_priv *priv = netdev_priv(dev);
448
449 priv->write_reg(priv, &priv->regs->ifregs[iface].mask1,
450 IFX_WRITE_LOW_16BIT(mask));
451 priv->write_reg(priv, &priv->regs->ifregs[iface].mask2,
452 IFX_WRITE_HIGH_16BIT(mask));
453
454 priv->write_reg(priv, &priv->regs->ifregs[iface].arb1,
455 IFX_WRITE_LOW_16BIT(id));
456 priv->write_reg(priv, &priv->regs->ifregs[iface].arb2,
457 (IF_ARB_MSGVAL | IFX_WRITE_HIGH_16BIT(id)));
458
459 priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, mcont);
460 c_can_object_put(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST);
461
462 netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
463 c_can_read_reg32(priv, &priv->regs->msgval1));
464}
465
466static void c_can_inval_msg_object(struct net_device *dev, int iface, int objno)
467{
468 struct c_can_priv *priv = netdev_priv(dev);
469
470 priv->write_reg(priv, &priv->regs->ifregs[iface].arb1, 0);
471 priv->write_reg(priv, &priv->regs->ifregs[iface].arb2, 0);
472 priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, 0);
473
474 c_can_object_put(dev, iface, objno, IF_COMM_ARB | IF_COMM_CONTROL);
475
476 netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
477 c_can_read_reg32(priv, &priv->regs->msgval1));
478}
479
480static inline int c_can_is_next_tx_obj_busy(struct c_can_priv *priv, int objno)
481{
482 int val = c_can_read_reg32(priv, &priv->regs->txrqst1);
483
484 /*
485 * as transmission request register's bit n-1 corresponds to
486 * message object n, we need to handle the same properly.
487 */
488 if (val & (1 << (objno - 1)))
489 return 1;
490
491 return 0;
492}
493
494static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
495 struct net_device *dev)
496{
497 u32 msg_obj_no;
498 struct c_can_priv *priv = netdev_priv(dev);
499 struct can_frame *frame = (struct can_frame *)skb->data;
500
501 if (can_dropped_invalid_skb(dev, skb))
502 return NETDEV_TX_OK;
503
504 msg_obj_no = get_tx_next_msg_obj(priv);
505
506 /* prepare message object for transmission */
507 c_can_write_msg_object(dev, 0, frame, msg_obj_no);
508 can_put_echo_skb(skb, dev, msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
509
510 /*
511 * we have to stop the queue in case of a wrap around or
512 * if the next TX message object is still in use
513 */
514 priv->tx_next++;
515 if (c_can_is_next_tx_obj_busy(priv, get_tx_next_msg_obj(priv)) ||
516 (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) == 0)
517 netif_stop_queue(dev);
518
519 return NETDEV_TX_OK;
520}
521
522static int c_can_set_bittiming(struct net_device *dev)
523{
524 unsigned int reg_btr, reg_brpe, ctrl_save;
525 u8 brp, brpe, sjw, tseg1, tseg2;
526 u32 ten_bit_brp;
527 struct c_can_priv *priv = netdev_priv(dev);
528 const struct can_bittiming *bt = &priv->can.bittiming;
529
530 /* c_can provides a 6-bit brp and 4-bit brpe fields */
531 ten_bit_brp = bt->brp - 1;
532 brp = ten_bit_brp & BTR_BRP_MASK;
533 brpe = ten_bit_brp >> 6;
534
535 sjw = bt->sjw - 1;
536 tseg1 = bt->prop_seg + bt->phase_seg1 - 1;
537 tseg2 = bt->phase_seg2 - 1;
538 reg_btr = brp | (sjw << BTR_SJW_SHIFT) | (tseg1 << BTR_TSEG1_SHIFT) |
539 (tseg2 << BTR_TSEG2_SHIFT);
540 reg_brpe = brpe & BRP_EXT_BRPE_MASK;
541
542 netdev_info(dev,
543 "setting BTR=%04x BRPE=%04x\n", reg_btr, reg_brpe);
544
545 ctrl_save = priv->read_reg(priv, &priv->regs->control);
546 priv->write_reg(priv, &priv->regs->control,
547 ctrl_save | CONTROL_CCE | CONTROL_INIT);
548 priv->write_reg(priv, &priv->regs->btr, reg_btr);
549 priv->write_reg(priv, &priv->regs->brp_ext, reg_brpe);
550 priv->write_reg(priv, &priv->regs->control, ctrl_save);
551
552 return 0;
553}
554
555/*
556 * Configure C_CAN message objects for Tx and Rx purposes:
557 * C_CAN provides a total of 32 message objects that can be configured
558 * either for Tx or Rx purposes. Here the first 16 message objects are used as
559 * a reception FIFO. The end of reception FIFO is signified by the EoB bit
560 * being SET. The remaining 16 message objects are kept aside for Tx purposes.
561 * See user guide document for further details on configuring message
562 * objects.
563 */
564static void c_can_configure_msg_objects(struct net_device *dev)
565{
566 int i;
567
568 /* first invalidate all message objects */
569 for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_NO_OF_OBJECTS; i++)
570 c_can_inval_msg_object(dev, 0, i);
571
572 /* setup receive message objects */
573 for (i = C_CAN_MSG_OBJ_RX_FIRST; i < C_CAN_MSG_OBJ_RX_LAST; i++)
574 c_can_setup_receive_object(dev, 0, i, 0, 0,
575 (IF_MCONT_RXIE | IF_MCONT_UMASK) & ~IF_MCONT_EOB);
576
577 c_can_setup_receive_object(dev, 0, C_CAN_MSG_OBJ_RX_LAST, 0, 0,
578 IF_MCONT_EOB | IF_MCONT_RXIE | IF_MCONT_UMASK);
579}
580
581/*
582 * Configure C_CAN chip:
583 * - enable/disable auto-retransmission
584 * - set operating mode
585 * - configure message objects
586 */
587static void c_can_chip_config(struct net_device *dev)
588{
589 struct c_can_priv *priv = netdev_priv(dev);
590
591 if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
592 /* disable automatic retransmission */
593 priv->write_reg(priv, &priv->regs->control,
594 CONTROL_DISABLE_AR);
595 else
596 /* enable automatic retransmission */
597 priv->write_reg(priv, &priv->regs->control,
598 CONTROL_ENABLE_AR);
599
600 if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY &
601 CAN_CTRLMODE_LOOPBACK)) {
602 /* loopback + silent mode : useful for hot self-test */
603 priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
604 CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
605 priv->write_reg(priv, &priv->regs->test,
606 TEST_LBACK | TEST_SILENT);
607 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
608 /* loopback mode : useful for self-test function */
609 priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
610 CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
611 priv->write_reg(priv, &priv->regs->test, TEST_LBACK);
612 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
613 /* silent mode : bus-monitoring mode */
614 priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
615 CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
616 priv->write_reg(priv, &priv->regs->test, TEST_SILENT);
617 } else
618 /* normal mode*/
619 priv->write_reg(priv, &priv->regs->control,
620 CONTROL_EIE | CONTROL_SIE | CONTROL_IE);
621
622 /* configure message objects */
623 c_can_configure_msg_objects(dev);
624
625 /* set a `lec` value so that we can check for updates later */
626 priv->write_reg(priv, &priv->regs->status, LEC_UNUSED);
627
628 /* set bittiming params */
629 c_can_set_bittiming(dev);
630}
631
632static void c_can_start(struct net_device *dev)
633{
634 struct c_can_priv *priv = netdev_priv(dev);
635
636 /* enable status change, error and module interrupts */
637 c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
638
639 /* basic c_can configuration */
640 c_can_chip_config(dev);
641
642 priv->can.state = CAN_STATE_ERROR_ACTIVE;
643
644 /* reset tx helper pointers */
645 priv->tx_next = priv->tx_echo = 0;
646}
647
648static void c_can_stop(struct net_device *dev)
649{
650 struct c_can_priv *priv = netdev_priv(dev);
651
652 /* disable all interrupts */
653 c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
654
655 /* set the state as STOPPED */
656 priv->can.state = CAN_STATE_STOPPED;
657}
658
659static int c_can_set_mode(struct net_device *dev, enum can_mode mode)
660{
661 switch (mode) {
662 case CAN_MODE_START:
663 c_can_start(dev);
664 netif_wake_queue(dev);
665 break;
666 default:
667 return -EOPNOTSUPP;
668 }
669
670 return 0;
671}
672
673static int c_can_get_berr_counter(const struct net_device *dev,
674 struct can_berr_counter *bec)
675{
676 unsigned int reg_err_counter;
677 struct c_can_priv *priv = netdev_priv(dev);
678
679 reg_err_counter = priv->read_reg(priv, &priv->regs->err_cnt);
680 bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >>
681 ERR_CNT_REC_SHIFT;
682 bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK;
683
684 return 0;
685}
686
687/*
688 * theory of operation:
689 *
690 * priv->tx_echo holds the number of the oldest can_frame put for
691 * transmission into the hardware, but not yet ACKed by the CAN tx
692 * complete IRQ.
693 *
694 * We iterate from priv->tx_echo to priv->tx_next and check if the
695 * packet has been transmitted, echo it back to the CAN framework.
696 * If we discover a not yet transmitted package, stop looking for more.
697 */
698static void c_can_do_tx(struct net_device *dev)
699{
700 u32 val;
701 u32 msg_obj_no;
702 struct c_can_priv *priv = netdev_priv(dev);
703 struct net_device_stats *stats = &dev->stats;
704
705 for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
706 msg_obj_no = get_tx_echo_msg_obj(priv);
707 c_can_inval_msg_object(dev, 0, msg_obj_no);
708 val = c_can_read_reg32(priv, &priv->regs->txrqst1);
709 if (!(val & (1 << msg_obj_no))) {
710 can_get_echo_skb(dev,
711 msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
712 stats->tx_bytes += priv->read_reg(priv,
713 &priv->regs->ifregs[0].msg_cntrl)
714 & IF_MCONT_DLC_MASK;
715 stats->tx_packets++;
716 }
717 }
718
719 /* restart queue if wrap-up or if queue stalled on last pkt */
720 if (((priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) != 0) ||
721 ((priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) == 0))
722 netif_wake_queue(dev);
723}
724
725/*
726 * theory of operation:
727 *
728 * c_can core saves a received CAN message into the first free message
729 * object it finds free (starting with the lowest). Bits NEWDAT and
730 * INTPND are set for this message object indicating that a new message
731 * has arrived. To work-around this issue, we keep two groups of message
732 * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT.
733 *
734 * To ensure in-order frame reception we use the following
735 * approach while re-activating a message object to receive further
736 * frames:
737 * - if the current message object number is lower than
738 * C_CAN_MSG_RX_LOW_LAST, do not clear the NEWDAT bit while clearing
739 * the INTPND bit.
740 * - if the current message object number is equal to
741 * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of all lower
742 * receive message objects.
743 * - if the current message object number is greater than
744 * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of
745 * only this message object.
746 */
747static int c_can_do_rx_poll(struct net_device *dev, int quota)
748{
749 u32 num_rx_pkts = 0;
750 unsigned int msg_obj, msg_ctrl_save;
751 struct c_can_priv *priv = netdev_priv(dev);
752 u32 val = c_can_read_reg32(priv, &priv->regs->intpnd1);
753
754 for (msg_obj = C_CAN_MSG_OBJ_RX_FIRST;
755 msg_obj <= C_CAN_MSG_OBJ_RX_LAST && quota > 0;
756 val = c_can_read_reg32(priv, &priv->regs->intpnd1),
757 msg_obj++) {
758 /*
759 * as interrupt pending register's bit n-1 corresponds to
760 * message object n, we need to handle the same properly.
761 */
762 if (val & (1 << (msg_obj - 1))) {
763 c_can_object_get(dev, 0, msg_obj, IF_COMM_ALL &
764 ~IF_COMM_TXRQST);
765 msg_ctrl_save = priv->read_reg(priv,
766 &priv->regs->ifregs[0].msg_cntrl);
767
768 if (msg_ctrl_save & IF_MCONT_EOB)
769 return num_rx_pkts;
770
771 if (msg_ctrl_save & IF_MCONT_MSGLST) {
772 c_can_handle_lost_msg_obj(dev, 0, msg_obj);
773 num_rx_pkts++;
774 quota--;
775 continue;
776 }
777
778 if (!(msg_ctrl_save & IF_MCONT_NEWDAT))
779 continue;
780
781 /* read the data from the message object */
782 c_can_read_msg_object(dev, 0, msg_ctrl_save);
783
784 if (msg_obj < C_CAN_MSG_RX_LOW_LAST)
785 c_can_mark_rx_msg_obj(dev, 0,
786 msg_ctrl_save, msg_obj);
787 else if (msg_obj > C_CAN_MSG_RX_LOW_LAST)
788 /* activate this msg obj */
789 c_can_activate_rx_msg_obj(dev, 0,
790 msg_ctrl_save, msg_obj);
791 else if (msg_obj == C_CAN_MSG_RX_LOW_LAST)
792 /* activate all lower message objects */
793 c_can_activate_all_lower_rx_msg_obj(dev,
794 0, msg_ctrl_save);
795
796 num_rx_pkts++;
797 quota--;
798 }
799 }
800
801 return num_rx_pkts;
802}
803
804static inline int c_can_has_and_handle_berr(struct c_can_priv *priv)
805{
806 return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
807 (priv->current_status & LEC_UNUSED);
808}
809
810static int c_can_handle_state_change(struct net_device *dev,
811 enum c_can_bus_error_types error_type)
812{
813 unsigned int reg_err_counter;
814 unsigned int rx_err_passive;
815 struct c_can_priv *priv = netdev_priv(dev);
816 struct net_device_stats *stats = &dev->stats;
817 struct can_frame *cf;
818 struct sk_buff *skb;
819 struct can_berr_counter bec;
820
821 /* propogate the error condition to the CAN stack */
822 skb = alloc_can_err_skb(dev, &cf);
823 if (unlikely(!skb))
824 return 0;
825
826 c_can_get_berr_counter(dev, &bec);
827 reg_err_counter = priv->read_reg(priv, &priv->regs->err_cnt);
828 rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >>
829 ERR_CNT_RP_SHIFT;
830
831 switch (error_type) {
832 case C_CAN_ERROR_WARNING:
833 /* error warning state */
834 priv->can.can_stats.error_warning++;
835 priv->can.state = CAN_STATE_ERROR_WARNING;
836 cf->can_id |= CAN_ERR_CRTL;
837 cf->data[1] = (bec.txerr > bec.rxerr) ?
838 CAN_ERR_CRTL_TX_WARNING :
839 CAN_ERR_CRTL_RX_WARNING;
840 cf->data[6] = bec.txerr;
841 cf->data[7] = bec.rxerr;
842
843 break;
844 case C_CAN_ERROR_PASSIVE:
845 /* error passive state */
846 priv->can.can_stats.error_passive++;
847 priv->can.state = CAN_STATE_ERROR_PASSIVE;
848 cf->can_id |= CAN_ERR_CRTL;
849 if (rx_err_passive)
850 cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
851 if (bec.txerr > 127)
852 cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
853
854 cf->data[6] = bec.txerr;
855 cf->data[7] = bec.rxerr;
856 break;
857 case C_CAN_BUS_OFF:
858 /* bus-off state */
859 priv->can.state = CAN_STATE_BUS_OFF;
860 cf->can_id |= CAN_ERR_BUSOFF;
861 /*
862 * disable all interrupts in bus-off mode to ensure that
863 * the CPU is not hogged down
864 */
865 c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
866 can_bus_off(dev);
867 break;
868 default:
869 break;
870 }
871
872 netif_receive_skb(skb);
873 stats->rx_packets++;
874 stats->rx_bytes += cf->can_dlc;
875
876 return 1;
877}
878
879static int c_can_handle_bus_err(struct net_device *dev,
880 enum c_can_lec_type lec_type)
881{
882 struct c_can_priv *priv = netdev_priv(dev);
883 struct net_device_stats *stats = &dev->stats;
884 struct can_frame *cf;
885 struct sk_buff *skb;
886
887 /*
888 * early exit if no lec update or no error.
889 * no lec update means that no CAN bus event has been detected
890 * since CPU wrote 0x7 value to status reg.
891 */
892 if (lec_type == LEC_UNUSED || lec_type == LEC_NO_ERROR)
893 return 0;
894
895 /* propogate the error condition to the CAN stack */
896 skb = alloc_can_err_skb(dev, &cf);
897 if (unlikely(!skb))
898 return 0;
899
900 /*
901 * check for 'last error code' which tells us the
902 * type of the last error to occur on the CAN bus
903 */
904
905 /* common for all type of bus errors */
906 priv->can.can_stats.bus_error++;
907 stats->rx_errors++;
908 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
909 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
910
911 switch (lec_type) {
912 case LEC_STUFF_ERROR:
913 netdev_dbg(dev, "stuff error\n");
914 cf->data[2] |= CAN_ERR_PROT_STUFF;
915 break;
916 case LEC_FORM_ERROR:
917 netdev_dbg(dev, "form error\n");
918 cf->data[2] |= CAN_ERR_PROT_FORM;
919 break;
920 case LEC_ACK_ERROR:
921 netdev_dbg(dev, "ack error\n");
922 cf->data[2] |= (CAN_ERR_PROT_LOC_ACK |
923 CAN_ERR_PROT_LOC_ACK_DEL);
924 break;
925 case LEC_BIT1_ERROR:
926 netdev_dbg(dev, "bit1 error\n");
927 cf->data[2] |= CAN_ERR_PROT_BIT1;
928 break;
929 case LEC_BIT0_ERROR:
930 netdev_dbg(dev, "bit0 error\n");
931 cf->data[2] |= CAN_ERR_PROT_BIT0;
932 break;
933 case LEC_CRC_ERROR:
934 netdev_dbg(dev, "CRC error\n");
935 cf->data[2] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
936 CAN_ERR_PROT_LOC_CRC_DEL);
937 break;
938 default:
939 break;
940 }
941
942 /* set a `lec` value so that we can check for updates later */
943 priv->write_reg(priv, &priv->regs->status, LEC_UNUSED);
944
945 netif_receive_skb(skb);
946 stats->rx_packets++;
947 stats->rx_bytes += cf->can_dlc;
948
949 return 1;
950}
951
952static int c_can_poll(struct napi_struct *napi, int quota)
953{
954 u16 irqstatus;
955 int lec_type = 0;
956 int work_done = 0;
957 struct net_device *dev = napi->dev;
958 struct c_can_priv *priv = netdev_priv(dev);
959
960 irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
961 if (!irqstatus)
962 goto end;
963
964 /* status events have the highest priority */
965 if (irqstatus == STATUS_INTERRUPT) {
966 priv->current_status = priv->read_reg(priv,
967 &priv->regs->status);
968
969 /* handle Tx/Rx events */
970 if (priv->current_status & STATUS_TXOK)
971 priv->write_reg(priv, &priv->regs->status,
972 priv->current_status & ~STATUS_TXOK);
973
974 if (priv->current_status & STATUS_RXOK)
975 priv->write_reg(priv, &priv->regs->status,
976 priv->current_status & ~STATUS_RXOK);
977
978 /* handle state changes */
979 if ((priv->current_status & STATUS_EWARN) &&
980 (!(priv->last_status & STATUS_EWARN))) {
981 netdev_dbg(dev, "entered error warning state\n");
982 work_done += c_can_handle_state_change(dev,
983 C_CAN_ERROR_WARNING);
984 }
985 if ((priv->current_status & STATUS_EPASS) &&
986 (!(priv->last_status & STATUS_EPASS))) {
987 netdev_dbg(dev, "entered error passive state\n");
988 work_done += c_can_handle_state_change(dev,
989 C_CAN_ERROR_PASSIVE);
990 }
991 if ((priv->current_status & STATUS_BOFF) &&
992 (!(priv->last_status & STATUS_BOFF))) {
993 netdev_dbg(dev, "entered bus off state\n");
994 work_done += c_can_handle_state_change(dev,
995 C_CAN_BUS_OFF);
996 }
997
998 /* handle bus recovery events */
999 if ((!(priv->current_status & STATUS_BOFF)) &&
1000 (priv->last_status & STATUS_BOFF)) {
1001 netdev_dbg(dev, "left bus off state\n");
1002 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1003 }
1004 if ((!(priv->current_status & STATUS_EPASS)) &&
1005 (priv->last_status & STATUS_EPASS)) {
1006 netdev_dbg(dev, "left error passive state\n");
1007 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1008 }
1009
1010 priv->last_status = priv->current_status;
1011
1012 /* handle lec errors on the bus */
1013 lec_type = c_can_has_and_handle_berr(priv);
1014 if (lec_type)
1015 work_done += c_can_handle_bus_err(dev, lec_type);
1016 } else if ((irqstatus >= C_CAN_MSG_OBJ_RX_FIRST) &&
1017 (irqstatus <= C_CAN_MSG_OBJ_RX_LAST)) {
1018 /* handle events corresponding to receive message objects */
1019 work_done += c_can_do_rx_poll(dev, (quota - work_done));
1020 } else if ((irqstatus >= C_CAN_MSG_OBJ_TX_FIRST) &&
1021 (irqstatus <= C_CAN_MSG_OBJ_TX_LAST)) {
1022 /* handle events corresponding to transmit message objects */
1023 c_can_do_tx(dev);
1024 }
1025
1026end:
1027 if (work_done < quota) {
1028 napi_complete(napi);
1029 /* enable all IRQs */
1030 c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
1031 }
1032
1033 return work_done;
1034}
1035
1036static irqreturn_t c_can_isr(int irq, void *dev_id)
1037{
1038 u16 irqstatus;
1039 struct net_device *dev = (struct net_device *)dev_id;
1040 struct c_can_priv *priv = netdev_priv(dev);
1041
1042 irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
1043 if (!irqstatus)
1044 return IRQ_NONE;
1045
1046 /* disable all interrupts and schedule the NAPI */
1047 c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
1048 napi_schedule(&priv->napi);
1049
1050 return IRQ_HANDLED;
1051}
1052
1053static int c_can_open(struct net_device *dev)
1054{
1055 int err;
1056 struct c_can_priv *priv = netdev_priv(dev);
1057
1058 /* open the can device */
1059 err = open_candev(dev);
1060 if (err) {
1061 netdev_err(dev, "failed to open can device\n");
1062 return err;
1063 }
1064
1065 /* register interrupt handler */
1066 err = request_irq(dev->irq, &c_can_isr, IRQF_SHARED, dev->name,
1067 dev);
1068 if (err < 0) {
1069 netdev_err(dev, "failed to request interrupt\n");
1070 goto exit_irq_fail;
1071 }
1072
1073 /* start the c_can controller */
1074 c_can_start(dev);
1075
1076 napi_enable(&priv->napi);
1077 netif_start_queue(dev);
1078
1079 return 0;
1080
1081exit_irq_fail:
1082 close_candev(dev);
1083 return err;
1084}
1085
1086static int c_can_close(struct net_device *dev)
1087{
1088 struct c_can_priv *priv = netdev_priv(dev);
1089
1090 netif_stop_queue(dev);
1091 napi_disable(&priv->napi);
1092 c_can_stop(dev);
1093 free_irq(dev->irq, dev);
1094 close_candev(dev);
1095
1096 return 0;
1097}
1098
1099struct net_device *alloc_c_can_dev(void)
1100{
1101 struct net_device *dev;
1102 struct c_can_priv *priv;
1103
1104 dev = alloc_candev(sizeof(struct c_can_priv), C_CAN_MSG_OBJ_TX_NUM);
1105 if (!dev)
1106 return NULL;
1107
1108 priv = netdev_priv(dev);
1109 netif_napi_add(dev, &priv->napi, c_can_poll, C_CAN_NAPI_WEIGHT);
1110
1111 priv->dev = dev;
1112 priv->can.bittiming_const = &c_can_bittiming_const;
1113 priv->can.do_set_mode = c_can_set_mode;
1114 priv->can.do_get_berr_counter = c_can_get_berr_counter;
1115 priv->can.ctrlmode_supported = CAN_CTRLMODE_ONE_SHOT |
1116 CAN_CTRLMODE_LOOPBACK |
1117 CAN_CTRLMODE_LISTENONLY |
1118 CAN_CTRLMODE_BERR_REPORTING;
1119
1120 return dev;
1121}
1122EXPORT_SYMBOL_GPL(alloc_c_can_dev);
1123
1124void free_c_can_dev(struct net_device *dev)
1125{
1126 free_candev(dev);
1127}
1128EXPORT_SYMBOL_GPL(free_c_can_dev);
1129
1130static const struct net_device_ops c_can_netdev_ops = {
1131 .ndo_open = c_can_open,
1132 .ndo_stop = c_can_close,
1133 .ndo_start_xmit = c_can_start_xmit,
1134};
1135
1136int register_c_can_dev(struct net_device *dev)
1137{
1138 dev->flags |= IFF_ECHO; /* we support local echo */
1139 dev->netdev_ops = &c_can_netdev_ops;
1140
1141 return register_candev(dev);
1142}
1143EXPORT_SYMBOL_GPL(register_c_can_dev);
1144
1145void unregister_c_can_dev(struct net_device *dev)
1146{
1147 struct c_can_priv *priv = netdev_priv(dev);
1148
1149 /* disable all interrupts */
1150 c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
1151
1152 unregister_candev(dev);
1153}
1154EXPORT_SYMBOL_GPL(unregister_c_can_dev);
1155
1156MODULE_AUTHOR("Bhupesh Sharma <bhupesh.sharma@st.com>");
1157MODULE_LICENSE("GPL v2");
1158MODULE_DESCRIPTION("CAN bus driver for Bosch C_CAN controller");
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
new file mode 100644
index 00000000000..9b7fbef3d09
--- /dev/null
+++ b/drivers/net/can/c_can/c_can.h
@@ -0,0 +1,86 @@
1/*
2 * CAN bus driver for Bosch C_CAN controller
3 *
4 * Copyright (C) 2010 ST Microelectronics
5 * Bhupesh Sharma <bhupesh.sharma@st.com>
6 *
7 * Borrowed heavily from the C_CAN driver originally written by:
8 * Copyright (C) 2007
9 * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
10 * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
11 *
12 * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B.
13 * Bosch C_CAN user manual can be obtained from:
14 * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/
15 * users_manual_c_can.pdf
16 *
17 * This file is licensed under the terms of the GNU General Public
18 * License version 2. This program is licensed "as is" without any
19 * warranty of any kind, whether express or implied.
20 */
21
22#ifndef C_CAN_H
23#define C_CAN_H
24
25/* c_can IF registers */
26struct c_can_if_regs {
27 u16 com_req;
28 u16 com_mask;
29 u16 mask1;
30 u16 mask2;
31 u16 arb1;
32 u16 arb2;
33 u16 msg_cntrl;
34 u16 data[4];
35 u16 _reserved[13];
36};
37
38/* c_can hardware registers */
39struct c_can_regs {
40 u16 control;
41 u16 status;
42 u16 err_cnt;
43 u16 btr;
44 u16 interrupt;
45 u16 test;
46 u16 brp_ext;
47 u16 _reserved1;
48 struct c_can_if_regs ifregs[2]; /* [0] = IF1 and [1] = IF2 */
49 u16 _reserved2[8];
50 u16 txrqst1;
51 u16 txrqst2;
52 u16 _reserved3[6];
53 u16 newdat1;
54 u16 newdat2;
55 u16 _reserved4[6];
56 u16 intpnd1;
57 u16 intpnd2;
58 u16 _reserved5[6];
59 u16 msgval1;
60 u16 msgval2;
61 u16 _reserved6[6];
62};
63
64/* c_can private data structure */
65struct c_can_priv {
66 struct can_priv can; /* must be the first member */
67 struct napi_struct napi;
68 struct net_device *dev;
69 int tx_object;
70 int current_status;
71 int last_status;
72 u16 (*read_reg) (struct c_can_priv *priv, void *reg);
73 void (*write_reg) (struct c_can_priv *priv, void *reg, u16 val);
74 struct c_can_regs __iomem *regs;
75 unsigned long irq_flags; /* for request_irq() */
76 unsigned int tx_next;
77 unsigned int tx_echo;
78 void *priv; /* for board-specific data */
79};
80
81struct net_device *alloc_c_can_dev(void);
82void free_c_can_dev(struct net_device *dev);
83int register_c_can_dev(struct net_device *dev);
84void unregister_c_can_dev(struct net_device *dev);
85
86#endif /* C_CAN_H */
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
new file mode 100644
index 00000000000..e629b961ae2
--- /dev/null
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -0,0 +1,215 @@
1/*
2 * Platform CAN bus driver for Bosch C_CAN controller
3 *
4 * Copyright (C) 2010 ST Microelectronics
5 * Bhupesh Sharma <bhupesh.sharma@st.com>
6 *
7 * Borrowed heavily from the C_CAN driver originally written by:
8 * Copyright (C) 2007
9 * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
10 * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
11 *
12 * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B.
13 * Bosch C_CAN user manual can be obtained from:
14 * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/
15 * users_manual_c_can.pdf
16 *
17 * This file is licensed under the terms of the GNU General Public
18 * License version 2. This program is licensed "as is" without any
19 * warranty of any kind, whether express or implied.
20 */
21
22#include <linux/kernel.h>
23#include <linux/version.h>
24#include <linux/module.h>
25#include <linux/interrupt.h>
26#include <linux/delay.h>
27#include <linux/netdevice.h>
28#include <linux/if_arp.h>
29#include <linux/if_ether.h>
30#include <linux/list.h>
31#include <linux/delay.h>
32#include <linux/io.h>
33#include <linux/platform_device.h>
34#include <linux/clk.h>
35
36#include <linux/can/dev.h>
37
38#include "c_can.h"
39
40/*
41 * 16-bit c_can registers can be arranged differently in the memory
42 * architecture of different implementations. For example: 16-bit
43 * registers can be aligned to a 16-bit boundary or 32-bit boundary etc.
44 * Handle the same by providing a common read/write interface.
45 */
46static u16 c_can_plat_read_reg_aligned_to_16bit(struct c_can_priv *priv,
47 void *reg)
48{
49 return readw(reg);
50}
51
52static void c_can_plat_write_reg_aligned_to_16bit(struct c_can_priv *priv,
53 void *reg, u16 val)
54{
55 writew(val, reg);
56}
57
58static u16 c_can_plat_read_reg_aligned_to_32bit(struct c_can_priv *priv,
59 void *reg)
60{
61 return readw(reg + (long)reg - (long)priv->regs);
62}
63
64static void c_can_plat_write_reg_aligned_to_32bit(struct c_can_priv *priv,
65 void *reg, u16 val)
66{
67 writew(val, reg + (long)reg - (long)priv->regs);
68}
69
70static int __devinit c_can_plat_probe(struct platform_device *pdev)
71{
72 int ret;
73 void __iomem *addr;
74 struct net_device *dev;
75 struct c_can_priv *priv;
76 struct resource *mem, *irq;
77#ifdef CONFIG_HAVE_CLK
78 struct clk *clk;
79
80 /* get the appropriate clk */
81 clk = clk_get(&pdev->dev, NULL);
82 if (IS_ERR(clk)) {
83 dev_err(&pdev->dev, "no clock defined\n");
84 ret = -ENODEV;
85 goto exit;
86 }
87#endif
88
89 /* get the platform data */
90 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
91 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
92 if (!mem || (irq <= 0)) {
93 ret = -ENODEV;
94 goto exit_free_clk;
95 }
96
97 if (!request_mem_region(mem->start, resource_size(mem),
98 KBUILD_MODNAME)) {
99 dev_err(&pdev->dev, "resource unavailable\n");
100 ret = -ENODEV;
101 goto exit_free_clk;
102 }
103
104 addr = ioremap(mem->start, resource_size(mem));
105 if (!addr) {
106 dev_err(&pdev->dev, "failed to map can port\n");
107 ret = -ENOMEM;
108 goto exit_release_mem;
109 }
110
111 /* allocate the c_can device */
112 dev = alloc_c_can_dev();
113 if (!dev) {
114 ret = -ENOMEM;
115 goto exit_iounmap;
116 }
117
118 priv = netdev_priv(dev);
119
120 dev->irq = irq->start;
121 priv->regs = addr;
122#ifdef CONFIG_HAVE_CLK
123 priv->can.clock.freq = clk_get_rate(clk);
124 priv->priv = clk;
125#endif
126
127 switch (mem->flags & IORESOURCE_MEM_TYPE_MASK) {
128 case IORESOURCE_MEM_32BIT:
129 priv->read_reg = c_can_plat_read_reg_aligned_to_32bit;
130 priv->write_reg = c_can_plat_write_reg_aligned_to_32bit;
131 break;
132 case IORESOURCE_MEM_16BIT:
133 default:
134 priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
135 priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
136 break;
137 }
138
139 platform_set_drvdata(pdev, dev);
140 SET_NETDEV_DEV(dev, &pdev->dev);
141
142 ret = register_c_can_dev(dev);
143 if (ret) {
144 dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
145 KBUILD_MODNAME, ret);
146 goto exit_free_device;
147 }
148
149 dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n",
150 KBUILD_MODNAME, priv->regs, dev->irq);
151 return 0;
152
153exit_free_device:
154 platform_set_drvdata(pdev, NULL);
155 free_c_can_dev(dev);
156exit_iounmap:
157 iounmap(addr);
158exit_release_mem:
159 release_mem_region(mem->start, resource_size(mem));
160exit_free_clk:
161#ifdef CONFIG_HAVE_CLK
162 clk_put(clk);
163exit:
164#endif
165 dev_err(&pdev->dev, "probe failed\n");
166
167 return ret;
168}
169
170static int __devexit c_can_plat_remove(struct platform_device *pdev)
171{
172 struct net_device *dev = platform_get_drvdata(pdev);
173 struct c_can_priv *priv = netdev_priv(dev);
174 struct resource *mem;
175
176 unregister_c_can_dev(dev);
177 platform_set_drvdata(pdev, NULL);
178
179 free_c_can_dev(dev);
180 iounmap(priv->regs);
181
182 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
183 release_mem_region(mem->start, resource_size(mem));
184
185#ifdef CONFIG_HAVE_CLK
186 clk_put(priv->priv);
187#endif
188
189 return 0;
190}
191
192static struct platform_driver c_can_plat_driver = {
193 .driver = {
194 .name = KBUILD_MODNAME,
195 .owner = THIS_MODULE,
196 },
197 .probe = c_can_plat_probe,
198 .remove = __devexit_p(c_can_plat_remove),
199};
200
201static int __init c_can_plat_init(void)
202{
203 return platform_driver_register(&c_can_plat_driver);
204}
205module_init(c_can_plat_init);
206
207static void __exit c_can_plat_exit(void)
208{
209 platform_driver_unregister(&c_can_plat_driver);
210}
211module_exit(c_can_plat_exit);
212
213MODULE_AUTHOR("Bhupesh Sharma <bhupesh.sharma@st.com>");
214MODULE_LICENSE("GPL v2");
215MODULE_DESCRIPTION("Platform CAN bus driver for Bosch C_CAN controller");
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 302be4aa69d..271a1f00c22 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -65,7 +65,14 @@ static LIST_HEAD(cnic_udev_list);
65static DEFINE_RWLOCK(cnic_dev_lock); 65static DEFINE_RWLOCK(cnic_dev_lock);
66static DEFINE_MUTEX(cnic_lock); 66static DEFINE_MUTEX(cnic_lock);
67 67
68static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE]; 68static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
69
70/* helper function, assuming cnic_lock is held */
71static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type)
72{
73 return rcu_dereference_protected(cnic_ulp_tbl[type],
74 lockdep_is_held(&cnic_lock));
75}
69 76
70static int cnic_service_bnx2(void *, void *); 77static int cnic_service_bnx2(void *, void *);
71static int cnic_service_bnx2x(void *, void *); 78static int cnic_service_bnx2x(void *, void *);
@@ -435,7 +442,7 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
435 return -EINVAL; 442 return -EINVAL;
436 } 443 }
437 mutex_lock(&cnic_lock); 444 mutex_lock(&cnic_lock);
438 if (cnic_ulp_tbl[ulp_type]) { 445 if (cnic_ulp_tbl_prot(ulp_type)) {
439 pr_err("%s: Type %d has already been registered\n", 446 pr_err("%s: Type %d has already been registered\n",
440 __func__, ulp_type); 447 __func__, ulp_type);
441 mutex_unlock(&cnic_lock); 448 mutex_unlock(&cnic_lock);
@@ -478,7 +485,7 @@ int cnic_unregister_driver(int ulp_type)
478 return -EINVAL; 485 return -EINVAL;
479 } 486 }
480 mutex_lock(&cnic_lock); 487 mutex_lock(&cnic_lock);
481 ulp_ops = cnic_ulp_tbl[ulp_type]; 488 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
482 if (!ulp_ops) { 489 if (!ulp_ops) {
483 pr_err("%s: Type %d has not been registered\n", 490 pr_err("%s: Type %d has not been registered\n",
484 __func__, ulp_type); 491 __func__, ulp_type);
@@ -529,7 +536,7 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
529 return -EINVAL; 536 return -EINVAL;
530 } 537 }
531 mutex_lock(&cnic_lock); 538 mutex_lock(&cnic_lock);
532 if (cnic_ulp_tbl[ulp_type] == NULL) { 539 if (cnic_ulp_tbl_prot(ulp_type) == NULL) {
533 pr_err("%s: Driver with type %d has not been registered\n", 540 pr_err("%s: Driver with type %d has not been registered\n",
534 __func__, ulp_type); 541 __func__, ulp_type);
535 mutex_unlock(&cnic_lock); 542 mutex_unlock(&cnic_lock);
@@ -544,7 +551,7 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
544 551
545 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]); 552 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
546 cp->ulp_handle[ulp_type] = ulp_ctx; 553 cp->ulp_handle[ulp_type] = ulp_ctx;
547 ulp_ops = cnic_ulp_tbl[ulp_type]; 554 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
548 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops); 555 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
549 cnic_hold(dev); 556 cnic_hold(dev);
550 557
@@ -2970,7 +2977,8 @@ static void cnic_ulp_stop(struct cnic_dev *dev)
2970 struct cnic_ulp_ops *ulp_ops; 2977 struct cnic_ulp_ops *ulp_ops;
2971 2978
2972 mutex_lock(&cnic_lock); 2979 mutex_lock(&cnic_lock);
2973 ulp_ops = cp->ulp_ops[if_type]; 2980 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
2981 lockdep_is_held(&cnic_lock));
2974 if (!ulp_ops) { 2982 if (!ulp_ops) {
2975 mutex_unlock(&cnic_lock); 2983 mutex_unlock(&cnic_lock);
2976 continue; 2984 continue;
@@ -2994,7 +3002,8 @@ static void cnic_ulp_start(struct cnic_dev *dev)
2994 struct cnic_ulp_ops *ulp_ops; 3002 struct cnic_ulp_ops *ulp_ops;
2995 3003
2996 mutex_lock(&cnic_lock); 3004 mutex_lock(&cnic_lock);
2997 ulp_ops = cp->ulp_ops[if_type]; 3005 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3006 lockdep_is_held(&cnic_lock));
2998 if (!ulp_ops || !ulp_ops->cnic_start) { 3007 if (!ulp_ops || !ulp_ops->cnic_start) {
2999 mutex_unlock(&cnic_lock); 3008 mutex_unlock(&cnic_lock);
3000 continue; 3009 continue;
@@ -3058,7 +3067,7 @@ static void cnic_ulp_init(struct cnic_dev *dev)
3058 struct cnic_ulp_ops *ulp_ops; 3067 struct cnic_ulp_ops *ulp_ops;
3059 3068
3060 mutex_lock(&cnic_lock); 3069 mutex_lock(&cnic_lock);
3061 ulp_ops = cnic_ulp_tbl[i]; 3070 ulp_ops = cnic_ulp_tbl_prot(i);
3062 if (!ulp_ops || !ulp_ops->cnic_init) { 3071 if (!ulp_ops || !ulp_ops->cnic_init) {
3063 mutex_unlock(&cnic_lock); 3072 mutex_unlock(&cnic_lock);
3064 continue; 3073 continue;
@@ -3082,7 +3091,7 @@ static void cnic_ulp_exit(struct cnic_dev *dev)
3082 struct cnic_ulp_ops *ulp_ops; 3091 struct cnic_ulp_ops *ulp_ops;
3083 3092
3084 mutex_lock(&cnic_lock); 3093 mutex_lock(&cnic_lock);
3085 ulp_ops = cnic_ulp_tbl[i]; 3094 ulp_ops = cnic_ulp_tbl_prot(i);
3086 if (!ulp_ops || !ulp_ops->cnic_exit) { 3095 if (!ulp_ops || !ulp_ops->cnic_exit) {
3087 mutex_unlock(&cnic_lock); 3096 mutex_unlock(&cnic_lock);
3088 continue; 3097 continue;
@@ -3405,9 +3414,12 @@ static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
3405 memset(&fl, 0, sizeof(fl)); 3414 memset(&fl, 0, sizeof(fl));
3406 fl.nl_u.ip4_u.daddr = dst_addr->sin_addr.s_addr; 3415 fl.nl_u.ip4_u.daddr = dst_addr->sin_addr.s_addr;
3407 3416
3408 err = ip_route_output_key(&init_net, &rt, &fl); 3417 rt = ip_route_output_key(&init_net, &fl);
3409 if (!err) 3418 err = 0;
3419 if (!IS_ERR(rt))
3410 *dst = &rt->dst; 3420 *dst = &rt->dst;
3421 else
3422 err = PTR_ERR(rt);
3411 return err; 3423 return err;
3412#else 3424#else
3413 return -ENETUNREACH; 3425 return -ENETUNREACH;
@@ -4187,6 +4199,14 @@ static void cnic_enable_bnx2_int(struct cnic_dev *dev)
4187 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx); 4199 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
4188} 4200}
4189 4201
4202static void cnic_get_bnx2_iscsi_info(struct cnic_dev *dev)
4203{
4204 u32 max_conn;
4205
4206 max_conn = cnic_reg_rd_ind(dev, BNX2_FW_MAX_ISCSI_CONN);
4207 dev->max_iscsi_conn = max_conn;
4208}
4209
4190static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev) 4210static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
4191{ 4211{
4192 struct cnic_local *cp = dev->cnic_priv; 4212 struct cnic_local *cp = dev->cnic_priv;
@@ -4511,6 +4531,8 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
4511 return err; 4531 return err;
4512 } 4532 }
4513 4533
4534 cnic_get_bnx2_iscsi_info(dev);
4535
4514 return 0; 4536 return 0;
4515} 4537}
4516 4538
@@ -4722,129 +4744,6 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4722 cp->rx_cons = *cp->rx_cons_ptr; 4744 cp->rx_cons = *cp->rx_cons_ptr;
4723} 4745}
4724 4746
4725static int cnic_read_bnx2x_iscsi_mac(struct cnic_dev *dev, u32 upper_addr,
4726 u32 lower_addr)
4727{
4728 u32 val;
4729 u8 mac[6];
4730
4731 val = CNIC_RD(dev, upper_addr);
4732
4733 mac[0] = (u8) (val >> 8);
4734 mac[1] = (u8) val;
4735
4736 val = CNIC_RD(dev, lower_addr);
4737
4738 mac[2] = (u8) (val >> 24);
4739 mac[3] = (u8) (val >> 16);
4740 mac[4] = (u8) (val >> 8);
4741 mac[5] = (u8) val;
4742
4743 if (is_valid_ether_addr(mac)) {
4744 memcpy(dev->mac_addr, mac, 6);
4745 return 0;
4746 } else {
4747 return -EINVAL;
4748 }
4749}
4750
4751static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev)
4752{
4753 struct cnic_local *cp = dev->cnic_priv;
4754 u32 base, base2, addr, addr1, val;
4755 int port = CNIC_PORT(cp);
4756
4757 dev->max_iscsi_conn = 0;
4758 base = CNIC_RD(dev, MISC_REG_SHARED_MEM_ADDR);
4759 if (base == 0)
4760 return;
4761
4762 base2 = CNIC_RD(dev, (CNIC_PATH(cp) ? MISC_REG_GENERIC_CR_1 :
4763 MISC_REG_GENERIC_CR_0));
4764 addr = BNX2X_SHMEM_ADDR(base,
4765 dev_info.port_hw_config[port].iscsi_mac_upper);
4766
4767 addr1 = BNX2X_SHMEM_ADDR(base,
4768 dev_info.port_hw_config[port].iscsi_mac_lower);
4769
4770 cnic_read_bnx2x_iscsi_mac(dev, addr, addr1);
4771
4772 addr = BNX2X_SHMEM_ADDR(base, validity_map[port]);
4773 val = CNIC_RD(dev, addr);
4774
4775 if (!(val & SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT)) {
4776 u16 val16;
4777
4778 addr = BNX2X_SHMEM_ADDR(base,
4779 drv_lic_key[port].max_iscsi_init_conn);
4780 val16 = CNIC_RD16(dev, addr);
4781
4782 if (val16)
4783 val16 ^= 0x1e1e;
4784 dev->max_iscsi_conn = val16;
4785 }
4786
4787 if (BNX2X_CHIP_IS_E2(cp->chip_id))
4788 dev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
4789
4790 if (BNX2X_CHIP_IS_E1H(cp->chip_id) || BNX2X_CHIP_IS_E2(cp->chip_id)) {
4791 int func = CNIC_FUNC(cp);
4792 u32 mf_cfg_addr;
4793
4794 if (BNX2X_SHMEM2_HAS(base2, mf_cfg_addr))
4795 mf_cfg_addr = CNIC_RD(dev, BNX2X_SHMEM2_ADDR(base2,
4796 mf_cfg_addr));
4797 else
4798 mf_cfg_addr = base + BNX2X_SHMEM_MF_BLK_OFFSET;
4799
4800 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
4801 /* Must determine if the MF is SD vs SI mode */
4802 addr = BNX2X_SHMEM_ADDR(base,
4803 dev_info.shared_feature_config.config);
4804 val = CNIC_RD(dev, addr);
4805 if ((val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK) ==
4806 SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT) {
4807 int rc;
4808
4809 /* MULTI_FUNCTION_SI mode */
4810 addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
4811 func_ext_config[func].func_cfg);
4812 val = CNIC_RD(dev, addr);
4813 if (!(val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD))
4814 dev->max_iscsi_conn = 0;
4815
4816 if (!(val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD))
4817 dev->max_fcoe_conn = 0;
4818
4819 addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
4820 func_ext_config[func].
4821 iscsi_mac_addr_upper);
4822 addr1 = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
4823 func_ext_config[func].
4824 iscsi_mac_addr_lower);
4825 rc = cnic_read_bnx2x_iscsi_mac(dev, addr,
4826 addr1);
4827 if (rc && func > 1)
4828 dev->max_iscsi_conn = 0;
4829
4830 return;
4831 }
4832 }
4833
4834 addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
4835 func_mf_config[func].e1hov_tag);
4836
4837 val = CNIC_RD(dev, addr);
4838 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
4839 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
4840 dev->max_fcoe_conn = 0;
4841 dev->max_iscsi_conn = 0;
4842 }
4843 }
4844 if (!is_valid_ether_addr(dev->mac_addr))
4845 dev->max_iscsi_conn = 0;
4846}
4847
4848static void cnic_init_bnx2x_kcq(struct cnic_dev *dev) 4747static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
4849{ 4748{
4850 struct cnic_local *cp = dev->cnic_priv; 4749 struct cnic_local *cp = dev->cnic_priv;
@@ -4926,8 +4825,6 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
4926 4825
4927 cnic_init_bnx2x_kcq(dev); 4826 cnic_init_bnx2x_kcq(dev);
4928 4827
4929 cnic_get_bnx2x_iscsi_info(dev);
4930
4931 /* Only 1 EQ */ 4828 /* Only 1 EQ */
4932 CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX); 4829 CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
4933 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4830 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
@@ -5281,15 +5178,11 @@ static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
5281 5178
5282 dev_hold(dev); 5179 dev_hold(dev);
5283 pci_dev_get(pdev); 5180 pci_dev_get(pdev);
5284 if (pdev->device == PCI_DEVICE_ID_NX2_5709 || 5181 if ((pdev->device == PCI_DEVICE_ID_NX2_5709 ||
5285 pdev->device == PCI_DEVICE_ID_NX2_5709S) { 5182 pdev->device == PCI_DEVICE_ID_NX2_5709S) &&
5286 u8 rev; 5183 (pdev->revision < 0x10)) {
5287 5184 pci_dev_put(pdev);
5288 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); 5185 goto cnic_err;
5289 if (rev < 0x10) {
5290 pci_dev_put(pdev);
5291 goto cnic_err;
5292 }
5293 } 5186 }
5294 pci_dev_put(pdev); 5187 pci_dev_put(pdev);
5295 5188
@@ -5360,6 +5253,14 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
5360 cdev->pcidev = pdev; 5253 cdev->pcidev = pdev;
5361 cp->chip_id = ethdev->chip_id; 5254 cp->chip_id = ethdev->chip_id;
5362 5255
5256 if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
5257 cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5258 if (BNX2X_CHIP_IS_E2(cp->chip_id) &&
5259 !(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
5260 cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
5261
5262 memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6);
5263
5363 cp->cnic_ops = &cnic_bnx2x_ops; 5264 cp->cnic_ops = &cnic_bnx2x_ops;
5364 cp->start_hw = cnic_start_bnx2x_hw; 5265 cp->start_hw = cnic_start_bnx2x_hw;
5365 cp->stop_hw = cnic_stop_bnx2x_hw; 5266 cp->stop_hw = cnic_stop_bnx2x_hw;
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h
index b328f6c924c..4456260c653 100644
--- a/drivers/net/cnic.h
+++ b/drivers/net/cnic.h
@@ -220,7 +220,7 @@ struct cnic_local {
220#define ULP_F_INIT 0 220#define ULP_F_INIT 0
221#define ULP_F_START 1 221#define ULP_F_START 1
222#define ULP_F_CALL_PENDING 2 222#define ULP_F_CALL_PENDING 2
223 struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE]; 223 struct cnic_ulp_ops __rcu *ulp_ops[MAX_CNIC_ULP_TYPE];
224 224
225 unsigned long cnic_local_flags; 225 unsigned long cnic_local_flags;
226#define CNIC_LCL_FL_KWQ_INIT 0x0 226#define CNIC_LCL_FL_KWQ_INIT 0x0
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
index 9f44e0ffe00..e01b49ee359 100644
--- a/drivers/net/cnic_if.h
+++ b/drivers/net/cnic_if.h
@@ -12,8 +12,8 @@
12#ifndef CNIC_IF_H 12#ifndef CNIC_IF_H
13#define CNIC_IF_H 13#define CNIC_IF_H
14 14
15#define CNIC_MODULE_VERSION "2.2.12" 15#define CNIC_MODULE_VERSION "2.2.13"
16#define CNIC_MODULE_RELDATE "Jan 03, 2011" 16#define CNIC_MODULE_RELDATE "Jan 31, 2011"
17 17
18#define CNIC_ULP_RDMA 0 18#define CNIC_ULP_RDMA 0
19#define CNIC_ULP_ISCSI 1 19#define CNIC_ULP_ISCSI 1
@@ -159,6 +159,9 @@ struct cnic_eth_dev {
159 u32 drv_state; 159 u32 drv_state;
160#define CNIC_DRV_STATE_REGD 0x00000001 160#define CNIC_DRV_STATE_REGD 0x00000001
161#define CNIC_DRV_STATE_USING_MSIX 0x00000002 161#define CNIC_DRV_STATE_USING_MSIX 0x00000002
162#define CNIC_DRV_STATE_NO_ISCSI_OOO 0x00000004
163#define CNIC_DRV_STATE_NO_ISCSI 0x00000008
164#define CNIC_DRV_STATE_NO_FCOE 0x00000010
162 u32 chip_id; 165 u32 chip_id;
163 u32 max_kwqe_pending; 166 u32 max_kwqe_pending;
164 struct pci_dev *pdev; 167 struct pci_dev *pdev;
@@ -176,6 +179,7 @@ struct cnic_eth_dev {
176 u32 fcoe_init_cid; 179 u32 fcoe_init_cid;
177 u16 iscsi_l2_client_id; 180 u16 iscsi_l2_client_id;
178 u16 iscsi_l2_cid; 181 u16 iscsi_l2_cid;
182 u8 iscsi_mac[ETH_ALEN];
179 183
180 int num_irq; 184 int num_irq;
181 struct cnic_irq irq_arr[MAX_CNIC_VEC]; 185 struct cnic_irq irq_arr[MAX_CNIC_VEC];
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index ef02aa68c92..862804f32b6 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -186,9 +186,10 @@ static struct net_device *get_iff_from_mac(struct adapter *adapter,
186 dev = NULL; 186 dev = NULL;
187 if (grp) 187 if (grp)
188 dev = vlan_group_get_device(grp, vlan); 188 dev = vlan_group_get_device(grp, vlan);
189 } else 189 } else if (netif_is_bond_slave(dev)) {
190 while (dev->master) 190 while (dev->master)
191 dev = dev->master; 191 dev = dev->master;
192 }
192 return dev; 193 return dev;
193 } 194 }
194 } 195 }
@@ -967,8 +968,6 @@ static int nb_callback(struct notifier_block *self, unsigned long event,
967 cxgb_neigh_update((struct neighbour *)ctx); 968 cxgb_neigh_update((struct neighbour *)ctx);
968 break; 969 break;
969 } 970 }
970 case (NETEVENT_PMTU_UPDATE):
971 break;
972 case (NETEVENT_REDIRECT):{ 971 case (NETEVENT_REDIRECT):{
973 struct netevent_redirect *nr = ctx; 972 struct netevent_redirect *nr = ctx;
974 cxgb_redirect(nr->old, nr->new); 973 cxgb_redirect(nr->old, nr->new);
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index ec35d458102..5352c8a23f4 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -2471,7 +2471,6 @@ static int netevent_cb(struct notifier_block *nb, unsigned long event,
2471 case NETEVENT_NEIGH_UPDATE: 2471 case NETEVENT_NEIGH_UPDATE:
2472 check_neigh_update(data); 2472 check_neigh_update(data);
2473 break; 2473 break;
2474 case NETEVENT_PMTU_UPDATE:
2475 case NETEVENT_REDIRECT: 2474 case NETEVENT_REDIRECT:
2476 default: 2475 default:
2477 break; 2476 break;
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 461dd6f905f..31770811360 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -1593,10 +1593,15 @@ dm9000_probe(struct platform_device *pdev)
1593 ndev->dev_addr[i] = ior(db, i+DM9000_PAR); 1593 ndev->dev_addr[i] = ior(db, i+DM9000_PAR);
1594 } 1594 }
1595 1595
1596 if (!is_valid_ether_addr(ndev->dev_addr)) 1596 if (!is_valid_ether_addr(ndev->dev_addr)) {
1597 dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please " 1597 dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please "
1598 "set using ifconfig\n", ndev->name); 1598 "set using ifconfig\n", ndev->name);
1599 1599
1600 random_ether_addr(ndev->dev_addr);
1601 mac_src = "random";
1602 }
1603
1604
1600 platform_set_drvdata(pdev, ndev); 1605 platform_set_drvdata(pdev, ndev);
1601 ret = register_netdev(ndev); 1606 ret = register_netdev(ndev);
1602 1607
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index e610e136905..00bf595ebd6 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -364,6 +364,7 @@ struct e1000_adapter {
364 /* structs defined in e1000_hw.h */ 364 /* structs defined in e1000_hw.h */
365 struct e1000_hw hw; 365 struct e1000_hw hw;
366 366
367 spinlock_t stats64_lock;
367 struct e1000_hw_stats stats; 368 struct e1000_hw_stats stats;
368 struct e1000_phy_info phy_info; 369 struct e1000_phy_info phy_info;
369 struct e1000_phy_stats phy_stats; 370 struct e1000_phy_stats phy_stats;
@@ -494,7 +495,9 @@ extern int e1000e_setup_rx_resources(struct e1000_adapter *adapter);
494extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter); 495extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter);
495extern void e1000e_free_rx_resources(struct e1000_adapter *adapter); 496extern void e1000e_free_rx_resources(struct e1000_adapter *adapter);
496extern void e1000e_free_tx_resources(struct e1000_adapter *adapter); 497extern void e1000e_free_tx_resources(struct e1000_adapter *adapter);
497extern void e1000e_update_stats(struct e1000_adapter *adapter); 498extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
499 struct rtnl_link_stats64
500 *stats);
498extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); 501extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
499extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); 502extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
500extern void e1000e_get_hw_control(struct e1000_adapter *adapter); 503extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index fa08b6336cf..d4e51aa231b 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -46,15 +46,15 @@ struct e1000_stats {
46}; 46};
47 47
48#define E1000_STAT(str, m) { \ 48#define E1000_STAT(str, m) { \
49 .stat_string = str, \ 49 .stat_string = str, \
50 .type = E1000_STATS, \ 50 .type = E1000_STATS, \
51 .sizeof_stat = sizeof(((struct e1000_adapter *)0)->m), \ 51 .sizeof_stat = sizeof(((struct e1000_adapter *)0)->m), \
52 .stat_offset = offsetof(struct e1000_adapter, m) } 52 .stat_offset = offsetof(struct e1000_adapter, m) }
53#define E1000_NETDEV_STAT(str, m) { \ 53#define E1000_NETDEV_STAT(str, m) { \
54 .stat_string = str, \ 54 .stat_string = str, \
55 .type = NETDEV_STATS, \ 55 .type = NETDEV_STATS, \
56 .sizeof_stat = sizeof(((struct net_device *)0)->m), \ 56 .sizeof_stat = sizeof(((struct rtnl_link_stats64 *)0)->m), \
57 .stat_offset = offsetof(struct net_device, m) } 57 .stat_offset = offsetof(struct rtnl_link_stats64, m) }
58 58
59static const struct e1000_stats e1000_gstrings_stats[] = { 59static const struct e1000_stats e1000_gstrings_stats[] = {
60 E1000_STAT("rx_packets", stats.gprc), 60 E1000_STAT("rx_packets", stats.gprc),
@@ -65,21 +65,21 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
65 E1000_STAT("tx_broadcast", stats.bptc), 65 E1000_STAT("tx_broadcast", stats.bptc),
66 E1000_STAT("rx_multicast", stats.mprc), 66 E1000_STAT("rx_multicast", stats.mprc),
67 E1000_STAT("tx_multicast", stats.mptc), 67 E1000_STAT("tx_multicast", stats.mptc),
68 E1000_NETDEV_STAT("rx_errors", stats.rx_errors), 68 E1000_NETDEV_STAT("rx_errors", rx_errors),
69 E1000_NETDEV_STAT("tx_errors", stats.tx_errors), 69 E1000_NETDEV_STAT("tx_errors", tx_errors),
70 E1000_NETDEV_STAT("tx_dropped", stats.tx_dropped), 70 E1000_NETDEV_STAT("tx_dropped", tx_dropped),
71 E1000_STAT("multicast", stats.mprc), 71 E1000_STAT("multicast", stats.mprc),
72 E1000_STAT("collisions", stats.colc), 72 E1000_STAT("collisions", stats.colc),
73 E1000_NETDEV_STAT("rx_length_errors", stats.rx_length_errors), 73 E1000_NETDEV_STAT("rx_length_errors", rx_length_errors),
74 E1000_NETDEV_STAT("rx_over_errors", stats.rx_over_errors), 74 E1000_NETDEV_STAT("rx_over_errors", rx_over_errors),
75 E1000_STAT("rx_crc_errors", stats.crcerrs), 75 E1000_STAT("rx_crc_errors", stats.crcerrs),
76 E1000_NETDEV_STAT("rx_frame_errors", stats.rx_frame_errors), 76 E1000_NETDEV_STAT("rx_frame_errors", rx_frame_errors),
77 E1000_STAT("rx_no_buffer_count", stats.rnbc), 77 E1000_STAT("rx_no_buffer_count", stats.rnbc),
78 E1000_STAT("rx_missed_errors", stats.mpc), 78 E1000_STAT("rx_missed_errors", stats.mpc),
79 E1000_STAT("tx_aborted_errors", stats.ecol), 79 E1000_STAT("tx_aborted_errors", stats.ecol),
80 E1000_STAT("tx_carrier_errors", stats.tncrs), 80 E1000_STAT("tx_carrier_errors", stats.tncrs),
81 E1000_NETDEV_STAT("tx_fifo_errors", stats.tx_fifo_errors), 81 E1000_NETDEV_STAT("tx_fifo_errors", tx_fifo_errors),
82 E1000_NETDEV_STAT("tx_heartbeat_errors", stats.tx_heartbeat_errors), 82 E1000_NETDEV_STAT("tx_heartbeat_errors", tx_heartbeat_errors),
83 E1000_STAT("tx_window_errors", stats.latecol), 83 E1000_STAT("tx_window_errors", stats.latecol),
84 E1000_STAT("tx_abort_late_coll", stats.latecol), 84 E1000_STAT("tx_abort_late_coll", stats.latecol),
85 E1000_STAT("tx_deferred_ok", stats.dc), 85 E1000_STAT("tx_deferred_ok", stats.dc),
@@ -433,13 +433,11 @@ static void e1000_get_regs(struct net_device *netdev,
433 struct e1000_hw *hw = &adapter->hw; 433 struct e1000_hw *hw = &adapter->hw;
434 u32 *regs_buff = p; 434 u32 *regs_buff = p;
435 u16 phy_data; 435 u16 phy_data;
436 u8 revision_id;
437 436
438 memset(p, 0, E1000_REGS_LEN * sizeof(u32)); 437 memset(p, 0, E1000_REGS_LEN * sizeof(u32));
439 438
440 pci_read_config_byte(adapter->pdev, PCI_REVISION_ID, &revision_id); 439 regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
441 440 adapter->pdev->device;
442 regs->version = (1 << 24) | (revision_id << 16) | adapter->pdev->device;
443 441
444 regs_buff[0] = er32(CTRL); 442 regs_buff[0] = er32(CTRL);
445 regs_buff[1] = er32(STATUS); 443 regs_buff[1] = er32(STATUS);
@@ -684,20 +682,13 @@ static int e1000_set_ringparam(struct net_device *netdev,
684 rx_old = adapter->rx_ring; 682 rx_old = adapter->rx_ring;
685 683
686 err = -ENOMEM; 684 err = -ENOMEM;
687 tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); 685 tx_ring = kmemdup(tx_old, sizeof(struct e1000_ring), GFP_KERNEL);
688 if (!tx_ring) 686 if (!tx_ring)
689 goto err_alloc_tx; 687 goto err_alloc_tx;
690 /*
691 * use a memcpy to save any previously configured
692 * items like napi structs from having to be
693 * reinitialized
694 */
695 memcpy(tx_ring, tx_old, sizeof(struct e1000_ring));
696 688
697 rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); 689 rx_ring = kmemdup(rx_old, sizeof(struct e1000_ring), GFP_KERNEL);
698 if (!rx_ring) 690 if (!rx_ring)
699 goto err_alloc_rx; 691 goto err_alloc_rx;
700 memcpy(rx_ring, rx_old, sizeof(struct e1000_ring));
701 692
702 adapter->tx_ring = tx_ring; 693 adapter->tx_ring = tx_ring;
703 adapter->rx_ring = rx_ring; 694 adapter->rx_ring = rx_ring;
@@ -1255,7 +1246,6 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1255{ 1246{
1256 struct e1000_hw *hw = &adapter->hw; 1247 struct e1000_hw *hw = &adapter->hw;
1257 u32 ctrl_reg = 0; 1248 u32 ctrl_reg = 0;
1258 u32 stat_reg = 0;
1259 u16 phy_reg = 0; 1249 u16 phy_reg = 0;
1260 s32 ret_val = 0; 1250 s32 ret_val = 0;
1261 1251
@@ -1363,8 +1353,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1363 * Set the ILOS bit on the fiber Nic if half duplex link is 1353 * Set the ILOS bit on the fiber Nic if half duplex link is
1364 * detected. 1354 * detected.
1365 */ 1355 */
1366 stat_reg = er32(STATUS); 1356 if ((er32(STATUS) & E1000_STATUS_FD) == 0)
1367 if ((stat_reg & E1000_STATUS_FD) == 0)
1368 ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); 1357 ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
1369 } 1358 }
1370 1359
@@ -1972,8 +1961,15 @@ static int e1000_set_coalesce(struct net_device *netdev,
1972static int e1000_nway_reset(struct net_device *netdev) 1961static int e1000_nway_reset(struct net_device *netdev)
1973{ 1962{
1974 struct e1000_adapter *adapter = netdev_priv(netdev); 1963 struct e1000_adapter *adapter = netdev_priv(netdev);
1975 if (netif_running(netdev)) 1964
1976 e1000e_reinit_locked(adapter); 1965 if (!netif_running(netdev))
1966 return -EAGAIN;
1967
1968 if (!adapter->hw.mac.autoneg)
1969 return -EINVAL;
1970
1971 e1000e_reinit_locked(adapter);
1972
1977 return 0; 1973 return 0;
1978} 1974}
1979 1975
@@ -1982,14 +1978,15 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
1982 u64 *data) 1978 u64 *data)
1983{ 1979{
1984 struct e1000_adapter *adapter = netdev_priv(netdev); 1980 struct e1000_adapter *adapter = netdev_priv(netdev);
1981 struct rtnl_link_stats64 net_stats;
1985 int i; 1982 int i;
1986 char *p = NULL; 1983 char *p = NULL;
1987 1984
1988 e1000e_update_stats(adapter); 1985 e1000e_get_stats64(netdev, &net_stats);
1989 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { 1986 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
1990 switch (e1000_gstrings_stats[i].type) { 1987 switch (e1000_gstrings_stats[i].type) {
1991 case NETDEV_STATS: 1988 case NETDEV_STATS:
1992 p = (char *) netdev + 1989 p = (char *) &net_stats +
1993 e1000_gstrings_stats[i].stat_offset; 1990 e1000_gstrings_stats[i].stat_offset;
1994 break; 1991 break;
1995 case E1000_STATS: 1992 case E1000_STATS:
@@ -2014,7 +2011,7 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset,
2014 2011
2015 switch (stringset) { 2012 switch (stringset) {
2016 case ETH_SS_TEST: 2013 case ETH_SS_TEST:
2017 memcpy(data, *e1000_gstrings_test, sizeof(e1000_gstrings_test)); 2014 memcpy(data, e1000_gstrings_test, sizeof(e1000_gstrings_test));
2018 break; 2015 break;
2019 case ETH_SS_STATS: 2016 case ETH_SS_STATS:
2020 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { 2017 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index fb46974cfec..232b42b7f7c 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -2104,7 +2104,6 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2104{ 2104{
2105 union ich8_hws_flash_status hsfsts; 2105 union ich8_hws_flash_status hsfsts;
2106 s32 ret_val = -E1000_ERR_NVM; 2106 s32 ret_val = -E1000_ERR_NVM;
2107 s32 i = 0;
2108 2107
2109 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 2108 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2110 2109
@@ -2140,6 +2139,8 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2140 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); 2139 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
2141 ret_val = 0; 2140 ret_val = 0;
2142 } else { 2141 } else {
2142 s32 i = 0;
2143
2143 /* 2144 /*
2144 * Otherwise poll for sometime so the current 2145 * Otherwise poll for sometime so the current
2145 * cycle has a chance to end before giving up. 2146 * cycle has a chance to end before giving up.
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 68aa1749bf6..96921de5df2 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -1978,15 +1978,15 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
1978{ 1978{
1979 struct e1000_nvm_info *nvm = &hw->nvm; 1979 struct e1000_nvm_info *nvm = &hw->nvm;
1980 u32 eecd = er32(EECD); 1980 u32 eecd = er32(EECD);
1981 u16 timeout = 0;
1982 u8 spi_stat_reg; 1981 u8 spi_stat_reg;
1983 1982
1984 if (nvm->type == e1000_nvm_eeprom_spi) { 1983 if (nvm->type == e1000_nvm_eeprom_spi) {
1984 u16 timeout = NVM_MAX_RETRY_SPI;
1985
1985 /* Clear SK and CS */ 1986 /* Clear SK and CS */
1986 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); 1987 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
1987 ew32(EECD, eecd); 1988 ew32(EECD, eecd);
1988 udelay(1); 1989 udelay(1);
1989 timeout = NVM_MAX_RETRY_SPI;
1990 1990
1991 /* 1991 /*
1992 * Read "Status Register" repeatedly until the LSB is cleared. 1992 * Read "Status Register" repeatedly until the LSB is cleared.
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 2e5022849f1..455d5a1101e 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -900,8 +900,6 @@ next_desc:
900 900
901 adapter->total_rx_bytes += total_rx_bytes; 901 adapter->total_rx_bytes += total_rx_bytes;
902 adapter->total_rx_packets += total_rx_packets; 902 adapter->total_rx_packets += total_rx_packets;
903 netdev->stats.rx_bytes += total_rx_bytes;
904 netdev->stats.rx_packets += total_rx_packets;
905 return cleaned; 903 return cleaned;
906} 904}
907 905
@@ -1060,8 +1058,6 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
1060 } 1058 }
1061 adapter->total_tx_bytes += total_tx_bytes; 1059 adapter->total_tx_bytes += total_tx_bytes;
1062 adapter->total_tx_packets += total_tx_packets; 1060 adapter->total_tx_packets += total_tx_packets;
1063 netdev->stats.tx_bytes += total_tx_bytes;
1064 netdev->stats.tx_packets += total_tx_packets;
1065 return count < tx_ring->count; 1061 return count < tx_ring->count;
1066} 1062}
1067 1063
@@ -1248,8 +1244,6 @@ next_desc:
1248 1244
1249 adapter->total_rx_bytes += total_rx_bytes; 1245 adapter->total_rx_bytes += total_rx_bytes;
1250 adapter->total_rx_packets += total_rx_packets; 1246 adapter->total_rx_packets += total_rx_packets;
1251 netdev->stats.rx_bytes += total_rx_bytes;
1252 netdev->stats.rx_packets += total_rx_packets;
1253 return cleaned; 1247 return cleaned;
1254} 1248}
1255 1249
@@ -1429,8 +1423,6 @@ next_desc:
1429 1423
1430 adapter->total_rx_bytes += total_rx_bytes; 1424 adapter->total_rx_bytes += total_rx_bytes;
1431 adapter->total_rx_packets += total_rx_packets; 1425 adapter->total_rx_packets += total_rx_packets;
1432 netdev->stats.rx_bytes += total_rx_bytes;
1433 netdev->stats.rx_packets += total_rx_packets;
1434 return cleaned; 1426 return cleaned;
1435} 1427}
1436 1428
@@ -1857,7 +1849,9 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
1857 int err = 0, vector = 0; 1849 int err = 0, vector = 0;
1858 1850
1859 if (strlen(netdev->name) < (IFNAMSIZ - 5)) 1851 if (strlen(netdev->name) < (IFNAMSIZ - 5))
1860 sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name); 1852 snprintf(adapter->rx_ring->name,
1853 sizeof(adapter->rx_ring->name) - 1,
1854 "%s-rx-0", netdev->name);
1861 else 1855 else
1862 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); 1856 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
1863 err = request_irq(adapter->msix_entries[vector].vector, 1857 err = request_irq(adapter->msix_entries[vector].vector,
@@ -1870,7 +1864,9 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
1870 vector++; 1864 vector++;
1871 1865
1872 if (strlen(netdev->name) < (IFNAMSIZ - 5)) 1866 if (strlen(netdev->name) < (IFNAMSIZ - 5))
1873 sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name); 1867 snprintf(adapter->tx_ring->name,
1868 sizeof(adapter->tx_ring->name) - 1,
1869 "%s-tx-0", netdev->name);
1874 else 1870 else
1875 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); 1871 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
1876 err = request_irq(adapter->msix_entries[vector].vector, 1872 err = request_irq(adapter->msix_entries[vector].vector,
@@ -2734,7 +2730,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2734{ 2730{
2735 struct e1000_hw *hw = &adapter->hw; 2731 struct e1000_hw *hw = &adapter->hw;
2736 u32 rctl, rfctl; 2732 u32 rctl, rfctl;
2737 u32 psrctl = 0;
2738 u32 pages = 0; 2733 u32 pages = 0;
2739 2734
2740 /* Workaround Si errata on 82579 - configure jumbo frame flow */ 2735 /* Workaround Si errata on 82579 - configure jumbo frame flow */
@@ -2833,6 +2828,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2833 adapter->rx_ps_pages = 0; 2828 adapter->rx_ps_pages = 0;
2834 2829
2835 if (adapter->rx_ps_pages) { 2830 if (adapter->rx_ps_pages) {
2831 u32 psrctl = 0;
2832
2836 /* Configure extra packet-split registers */ 2833 /* Configure extra packet-split registers */
2837 rfctl = er32(RFCTL); 2834 rfctl = er32(RFCTL);
2838 rfctl |= E1000_RFCTL_EXTEN; 2835 rfctl |= E1000_RFCTL_EXTEN;
@@ -3034,7 +3031,6 @@ static void e1000_set_multi(struct net_device *netdev)
3034 struct netdev_hw_addr *ha; 3031 struct netdev_hw_addr *ha;
3035 u8 *mta_list; 3032 u8 *mta_list;
3036 u32 rctl; 3033 u32 rctl;
3037 int i;
3038 3034
3039 /* Check for Promiscuous and All Multicast modes */ 3035 /* Check for Promiscuous and All Multicast modes */
3040 3036
@@ -3057,12 +3053,13 @@ static void e1000_set_multi(struct net_device *netdev)
3057 ew32(RCTL, rctl); 3053 ew32(RCTL, rctl);
3058 3054
3059 if (!netdev_mc_empty(netdev)) { 3055 if (!netdev_mc_empty(netdev)) {
3056 int i = 0;
3057
3060 mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC); 3058 mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
3061 if (!mta_list) 3059 if (!mta_list)
3062 return; 3060 return;
3063 3061
3064 /* prepare a packed array of only addresses. */ 3062 /* prepare a packed array of only addresses. */
3065 i = 0;
3066 netdev_for_each_mc_addr(ha, netdev) 3063 netdev_for_each_mc_addr(ha, netdev)
3067 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); 3064 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
3068 3065
@@ -3359,6 +3356,8 @@ static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
3359 e1e_flush(); 3356 e1e_flush();
3360} 3357}
3361 3358
3359static void e1000e_update_stats(struct e1000_adapter *adapter);
3360
3362void e1000e_down(struct e1000_adapter *adapter) 3361void e1000e_down(struct e1000_adapter *adapter)
3363{ 3362{
3364 struct net_device *netdev = adapter->netdev; 3363 struct net_device *netdev = adapter->netdev;
@@ -3393,6 +3392,11 @@ void e1000e_down(struct e1000_adapter *adapter)
3393 del_timer_sync(&adapter->phy_info_timer); 3392 del_timer_sync(&adapter->phy_info_timer);
3394 3393
3395 netif_carrier_off(netdev); 3394 netif_carrier_off(netdev);
3395
3396 spin_lock(&adapter->stats64_lock);
3397 e1000e_update_stats(adapter);
3398 spin_unlock(&adapter->stats64_lock);
3399
3396 adapter->link_speed = 0; 3400 adapter->link_speed = 0;
3397 adapter->link_duplex = 0; 3401 adapter->link_duplex = 0;
3398 3402
@@ -3437,6 +3441,8 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
3437 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 3441 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
3438 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 3442 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
3439 3443
3444 spin_lock_init(&adapter->stats64_lock);
3445
3440 e1000e_set_interrupt_capability(adapter); 3446 e1000e_set_interrupt_capability(adapter);
3441 3447
3442 if (e1000_alloc_queues(adapter)) 3448 if (e1000_alloc_queues(adapter))
@@ -3918,7 +3924,7 @@ release:
3918 * e1000e_update_stats - Update the board statistics counters 3924 * e1000e_update_stats - Update the board statistics counters
3919 * @adapter: board private structure 3925 * @adapter: board private structure
3920 **/ 3926 **/
3921void e1000e_update_stats(struct e1000_adapter *adapter) 3927static void e1000e_update_stats(struct e1000_adapter *adapter)
3922{ 3928{
3923 struct net_device *netdev = adapter->netdev; 3929 struct net_device *netdev = adapter->netdev;
3924 struct e1000_hw *hw = &adapter->hw; 3930 struct e1000_hw *hw = &adapter->hw;
@@ -4030,10 +4036,11 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
4030{ 4036{
4031 struct e1000_hw *hw = &adapter->hw; 4037 struct e1000_hw *hw = &adapter->hw;
4032 struct e1000_phy_regs *phy = &adapter->phy_regs; 4038 struct e1000_phy_regs *phy = &adapter->phy_regs;
4033 int ret_val;
4034 4039
4035 if ((er32(STATUS) & E1000_STATUS_LU) && 4040 if ((er32(STATUS) & E1000_STATUS_LU) &&
4036 (adapter->hw.phy.media_type == e1000_media_type_copper)) { 4041 (adapter->hw.phy.media_type == e1000_media_type_copper)) {
4042 int ret_val;
4043
4037 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr); 4044 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr);
4038 ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr); 4045 ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr);
4039 ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise); 4046 ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise);
@@ -4179,7 +4186,6 @@ static void e1000_watchdog_task(struct work_struct *work)
4179 struct e1000_ring *tx_ring = adapter->tx_ring; 4186 struct e1000_ring *tx_ring = adapter->tx_ring;
4180 struct e1000_hw *hw = &adapter->hw; 4187 struct e1000_hw *hw = &adapter->hw;
4181 u32 link, tctl; 4188 u32 link, tctl;
4182 int tx_pending = 0;
4183 4189
4184 if (test_bit(__E1000_DOWN, &adapter->state)) 4190 if (test_bit(__E1000_DOWN, &adapter->state))
4185 return; 4191 return;
@@ -4320,7 +4326,9 @@ static void e1000_watchdog_task(struct work_struct *work)
4320 } 4326 }
4321 4327
4322link_up: 4328link_up:
4329 spin_lock(&adapter->stats64_lock);
4323 e1000e_update_stats(adapter); 4330 e1000e_update_stats(adapter);
4331 spin_unlock(&adapter->stats64_lock);
4324 4332
4325 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; 4333 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
4326 adapter->tpt_old = adapter->stats.tpt; 4334 adapter->tpt_old = adapter->stats.tpt;
@@ -4334,20 +4342,17 @@ link_up:
4334 4342
4335 e1000e_update_adaptive(&adapter->hw); 4343 e1000e_update_adaptive(&adapter->hw);
4336 4344
4337 if (!netif_carrier_ok(netdev)) { 4345 if (!netif_carrier_ok(netdev) &&
4338 tx_pending = (e1000_desc_unused(tx_ring) + 1 < 4346 (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) {
4339 tx_ring->count); 4347 /*
4340 if (tx_pending) { 4348 * We've lost link, so the controller stops DMA,
4341 /* 4349 * but we've got queued Tx work that's never going
4342 * We've lost link, so the controller stops DMA, 4350 * to get done, so reset controller to flush Tx.
4343 * but we've got queued Tx work that's never going 4351 * (Do the reset outside of interrupt context).
4344 * to get done, so reset controller to flush Tx. 4352 */
4345 * (Do the reset outside of interrupt context). 4353 schedule_work(&adapter->reset_task);
4346 */ 4354 /* return immediately since reset is imminent */
4347 schedule_work(&adapter->reset_task); 4355 return;
4348 /* return immediately since reset is imminent */
4349 return;
4350 }
4351 } 4356 }
4352 4357
4353 /* Simple mode for Interrupt Throttle Rate (ITR) */ 4358 /* Simple mode for Interrupt Throttle Rate (ITR) */
@@ -4411,13 +4416,13 @@ static int e1000_tso(struct e1000_adapter *adapter,
4411 u32 cmd_length = 0; 4416 u32 cmd_length = 0;
4412 u16 ipcse = 0, tucse, mss; 4417 u16 ipcse = 0, tucse, mss;
4413 u8 ipcss, ipcso, tucss, tucso, hdr_len; 4418 u8 ipcss, ipcso, tucss, tucso, hdr_len;
4414 int err;
4415 4419
4416 if (!skb_is_gso(skb)) 4420 if (!skb_is_gso(skb))
4417 return 0; 4421 return 0;
4418 4422
4419 if (skb_header_cloned(skb)) { 4423 if (skb_header_cloned(skb)) {
4420 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 4424 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
4425
4421 if (err) 4426 if (err)
4422 return err; 4427 return err;
4423 } 4428 }
@@ -4928,16 +4933,55 @@ static void e1000_reset_task(struct work_struct *work)
4928} 4933}
4929 4934
4930/** 4935/**
4931 * e1000_get_stats - Get System Network Statistics 4936 * e1000_get_stats64 - Get System Network Statistics
4932 * @netdev: network interface device structure 4937 * @netdev: network interface device structure
4938 * @stats: rtnl_link_stats64 pointer
4933 * 4939 *
4934 * Returns the address of the device statistics structure. 4940 * Returns the address of the device statistics structure.
4935 * The statistics are actually updated from the timer callback.
4936 **/ 4941 **/
4937static struct net_device_stats *e1000_get_stats(struct net_device *netdev) 4942struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
4943 struct rtnl_link_stats64 *stats)
4938{ 4944{
4939 /* only return the current stats */ 4945 struct e1000_adapter *adapter = netdev_priv(netdev);
4940 return &netdev->stats; 4946
4947 memset(stats, 0, sizeof(struct rtnl_link_stats64));
4948 spin_lock(&adapter->stats64_lock);
4949 e1000e_update_stats(adapter);
4950 /* Fill out the OS statistics structure */
4951 stats->rx_bytes = adapter->stats.gorc;
4952 stats->rx_packets = adapter->stats.gprc;
4953 stats->tx_bytes = adapter->stats.gotc;
4954 stats->tx_packets = adapter->stats.gptc;
4955 stats->multicast = adapter->stats.mprc;
4956 stats->collisions = adapter->stats.colc;
4957
4958 /* Rx Errors */
4959
4960 /*
4961 * RLEC on some newer hardware can be incorrect so build
4962 * our own version based on RUC and ROC
4963 */
4964 stats->rx_errors = adapter->stats.rxerrc +
4965 adapter->stats.crcerrs + adapter->stats.algnerrc +
4966 adapter->stats.ruc + adapter->stats.roc +
4967 adapter->stats.cexterr;
4968 stats->rx_length_errors = adapter->stats.ruc +
4969 adapter->stats.roc;
4970 stats->rx_crc_errors = adapter->stats.crcerrs;
4971 stats->rx_frame_errors = adapter->stats.algnerrc;
4972 stats->rx_missed_errors = adapter->stats.mpc;
4973
4974 /* Tx Errors */
4975 stats->tx_errors = adapter->stats.ecol +
4976 adapter->stats.latecol;
4977 stats->tx_aborted_errors = adapter->stats.ecol;
4978 stats->tx_window_errors = adapter->stats.latecol;
4979 stats->tx_carrier_errors = adapter->stats.tncrs;
4980
4981 /* Tx Dropped needs to be maintained elsewhere */
4982
4983 spin_unlock(&adapter->stats64_lock);
4984 return stats;
4941} 4985}
4942 4986
4943/** 4987/**
@@ -5507,9 +5551,10 @@ static irqreturn_t e1000_intr_msix(int irq, void *data)
5507{ 5551{
5508 struct net_device *netdev = data; 5552 struct net_device *netdev = data;
5509 struct e1000_adapter *adapter = netdev_priv(netdev); 5553 struct e1000_adapter *adapter = netdev_priv(netdev);
5510 int vector, msix_irq;
5511 5554
5512 if (adapter->msix_entries) { 5555 if (adapter->msix_entries) {
5556 int vector, msix_irq;
5557
5513 vector = 0; 5558 vector = 0;
5514 msix_irq = adapter->msix_entries[vector].vector; 5559 msix_irq = adapter->msix_entries[vector].vector;
5515 disable_irq(msix_irq); 5560 disable_irq(msix_irq);
@@ -5706,7 +5751,7 @@ static const struct net_device_ops e1000e_netdev_ops = {
5706 .ndo_open = e1000_open, 5751 .ndo_open = e1000_open,
5707 .ndo_stop = e1000_close, 5752 .ndo_stop = e1000_close,
5708 .ndo_start_xmit = e1000_xmit_frame, 5753 .ndo_start_xmit = e1000_xmit_frame,
5709 .ndo_get_stats = e1000_get_stats, 5754 .ndo_get_stats64 = e1000e_get_stats64,
5710 .ndo_set_multicast_list = e1000_set_multi, 5755 .ndo_set_multicast_list = e1000_set_multi,
5711 .ndo_set_mac_address = e1000_set_mac, 5756 .ndo_set_mac_address = e1000_set_mac,
5712 .ndo_change_mtu = e1000_change_mtu, 5757 .ndo_change_mtu = e1000_change_mtu,
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 6bea051b134..6ae31fcfb62 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -2409,9 +2409,7 @@ static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg)
2409s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) 2409s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
2410{ 2410{
2411 s32 ret_val; 2411 s32 ret_val;
2412 u32 page_select = 0;
2413 u32 page = offset >> IGP_PAGE_SHIFT; 2412 u32 page = offset >> IGP_PAGE_SHIFT;
2414 u32 page_shift = 0;
2415 2413
2416 ret_val = hw->phy.ops.acquire(hw); 2414 ret_val = hw->phy.ops.acquire(hw);
2417 if (ret_val) 2415 if (ret_val)
@@ -2427,6 +2425,8 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
2427 hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); 2425 hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
2428 2426
2429 if (offset > MAX_PHY_MULTI_PAGE_REG) { 2427 if (offset > MAX_PHY_MULTI_PAGE_REG) {
2428 u32 page_shift, page_select;
2429
2430 /* 2430 /*
2431 * Page select is register 31 for phy address 1 and 22 for 2431 * Page select is register 31 for phy address 1 and 22 for
2432 * phy address 2 and 3. Page select is shifted only for 2432 * phy address 2 and 3. Page select is shifted only for
@@ -2468,9 +2468,7 @@ out:
2468s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data) 2468s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
2469{ 2469{
2470 s32 ret_val; 2470 s32 ret_val;
2471 u32 page_select = 0;
2472 u32 page = offset >> IGP_PAGE_SHIFT; 2471 u32 page = offset >> IGP_PAGE_SHIFT;
2473 u32 page_shift = 0;
2474 2472
2475 ret_val = hw->phy.ops.acquire(hw); 2473 ret_val = hw->phy.ops.acquire(hw);
2476 if (ret_val) 2474 if (ret_val)
@@ -2486,6 +2484,8 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
2486 hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); 2484 hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
2487 2485
2488 if (offset > MAX_PHY_MULTI_PAGE_REG) { 2486 if (offset > MAX_PHY_MULTI_PAGE_REG) {
2487 u32 page_shift, page_select;
2488
2489 /* 2489 /*
2490 * Page select is register 31 for phy address 1 and 22 for 2490 * Page select is register 31 for phy address 1 and 22 for
2491 * phy address 2 and 3. Page select is shifted only for 2491 * phy address 2 and 3. Page select is shifted only for
diff --git a/drivers/net/enic/Makefile b/drivers/net/enic/Makefile
index e7b6c31880b..2e573be16c1 100644
--- a/drivers/net/enic/Makefile
+++ b/drivers/net/enic/Makefile
@@ -1,5 +1,5 @@
1obj-$(CONFIG_ENIC) := enic.o 1obj-$(CONFIG_ENIC) := enic.o
2 2
3enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \ 3enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
4 enic_res.o vnic_dev.o vnic_rq.o vnic_vic.o 4 enic_res.o enic_dev.o vnic_dev.o vnic_rq.o vnic_vic.o
5 5
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index a937f49d9db..e816bbb9fbf 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -32,13 +32,13 @@
32 32
33#define DRV_NAME "enic" 33#define DRV_NAME "enic"
34#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" 34#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
35#define DRV_VERSION "1.4.1.10" 35#define DRV_VERSION "2.1.1.10"
36#define DRV_COPYRIGHT "Copyright 2008-2010 Cisco Systems, Inc" 36#define DRV_COPYRIGHT "Copyright 2008-2011 Cisco Systems, Inc"
37 37
38#define ENIC_BARS_MAX 6 38#define ENIC_BARS_MAX 6
39 39
40#define ENIC_WQ_MAX 8 40#define ENIC_WQ_MAX 1
41#define ENIC_RQ_MAX 8 41#define ENIC_RQ_MAX 1
42#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX) 42#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX)
43#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2) 43#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
44 44
@@ -49,7 +49,7 @@ struct enic_msix_entry {
49 void *devid; 49 void *devid;
50}; 50};
51 51
52#define ENIC_SET_APPLIED (1 << 0) 52#define ENIC_PORT_REQUEST_APPLIED (1 << 0)
53#define ENIC_SET_REQUEST (1 << 1) 53#define ENIC_SET_REQUEST (1 << 1)
54#define ENIC_SET_NAME (1 << 2) 54#define ENIC_SET_NAME (1 << 2)
55#define ENIC_SET_INSTANCE (1 << 3) 55#define ENIC_SET_INSTANCE (1 << 3)
@@ -101,7 +101,6 @@ struct enic {
101 /* receive queue cache line section */ 101 /* receive queue cache line section */
102 ____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX]; 102 ____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX];
103 unsigned int rq_count; 103 unsigned int rq_count;
104 int (*rq_alloc_buf)(struct vnic_rq *rq);
105 u64 rq_truncated_pkts; 104 u64 rq_truncated_pkts;
106 u64 rq_bad_fcs; 105 u64 rq_bad_fcs;
107 struct napi_struct napi[ENIC_RQ_MAX]; 106 struct napi_struct napi[ENIC_RQ_MAX];
diff --git a/drivers/net/enic/enic_dev.c b/drivers/net/enic/enic_dev.c
new file mode 100644
index 00000000000..37ad3a1c82e
--- /dev/null
+++ b/drivers/net/enic/enic_dev.c
@@ -0,0 +1,221 @@
1/*
2 * Copyright 2011 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 *
17 */
18
19#include <linux/pci.h>
20#include <linux/etherdevice.h>
21
22#include "vnic_dev.h"
23#include "vnic_vic.h"
24#include "enic_res.h"
25#include "enic.h"
26#include "enic_dev.h"
27
28int enic_dev_fw_info(struct enic *enic, struct vnic_devcmd_fw_info **fw_info)
29{
30 int err;
31
32 spin_lock(&enic->devcmd_lock);
33 err = vnic_dev_fw_info(enic->vdev, fw_info);
34 spin_unlock(&enic->devcmd_lock);
35
36 return err;
37}
38
39int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats)
40{
41 int err;
42
43 spin_lock(&enic->devcmd_lock);
44 err = vnic_dev_stats_dump(enic->vdev, vstats);
45 spin_unlock(&enic->devcmd_lock);
46
47 return err;
48}
49
50int enic_dev_add_station_addr(struct enic *enic)
51{
52 int err;
53
54 if (!is_valid_ether_addr(enic->netdev->dev_addr))
55 return -EADDRNOTAVAIL;
56
57 spin_lock(&enic->devcmd_lock);
58 err = vnic_dev_add_addr(enic->vdev, enic->netdev->dev_addr);
59 spin_unlock(&enic->devcmd_lock);
60
61 return err;
62}
63
64int enic_dev_del_station_addr(struct enic *enic)
65{
66 int err;
67
68 if (!is_valid_ether_addr(enic->netdev->dev_addr))
69 return -EADDRNOTAVAIL;
70
71 spin_lock(&enic->devcmd_lock);
72 err = vnic_dev_del_addr(enic->vdev, enic->netdev->dev_addr);
73 spin_unlock(&enic->devcmd_lock);
74
75 return err;
76}
77
78int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
79 int broadcast, int promisc, int allmulti)
80{
81 int err;
82
83 spin_lock(&enic->devcmd_lock);
84 err = vnic_dev_packet_filter(enic->vdev, directed,
85 multicast, broadcast, promisc, allmulti);
86 spin_unlock(&enic->devcmd_lock);
87
88 return err;
89}
90
91int enic_dev_add_addr(struct enic *enic, u8 *addr)
92{
93 int err;
94
95 spin_lock(&enic->devcmd_lock);
96 err = vnic_dev_add_addr(enic->vdev, addr);
97 spin_unlock(&enic->devcmd_lock);
98
99 return err;
100}
101
102int enic_dev_del_addr(struct enic *enic, u8 *addr)
103{
104 int err;
105
106 spin_lock(&enic->devcmd_lock);
107 err = vnic_dev_del_addr(enic->vdev, addr);
108 spin_unlock(&enic->devcmd_lock);
109
110 return err;
111}
112
113int enic_dev_notify_unset(struct enic *enic)
114{
115 int err;
116
117 spin_lock(&enic->devcmd_lock);
118 err = vnic_dev_notify_unset(enic->vdev);
119 spin_unlock(&enic->devcmd_lock);
120
121 return err;
122}
123
124int enic_dev_hang_notify(struct enic *enic)
125{
126 int err;
127
128 spin_lock(&enic->devcmd_lock);
129 err = vnic_dev_hang_notify(enic->vdev);
130 spin_unlock(&enic->devcmd_lock);
131
132 return err;
133}
134
135int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic)
136{
137 int err;
138
139 spin_lock(&enic->devcmd_lock);
140 err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
141 IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN);
142 spin_unlock(&enic->devcmd_lock);
143
144 return err;
145}
146
147int enic_dev_enable(struct enic *enic)
148{
149 int err;
150
151 spin_lock(&enic->devcmd_lock);
152 err = vnic_dev_enable_wait(enic->vdev);
153 spin_unlock(&enic->devcmd_lock);
154
155 return err;
156}
157
158int enic_dev_disable(struct enic *enic)
159{
160 int err;
161
162 spin_lock(&enic->devcmd_lock);
163 err = vnic_dev_disable(enic->vdev);
164 spin_unlock(&enic->devcmd_lock);
165
166 return err;
167}
168
169int enic_vnic_dev_deinit(struct enic *enic)
170{
171 int err;
172
173 spin_lock(&enic->devcmd_lock);
174 err = vnic_dev_deinit(enic->vdev);
175 spin_unlock(&enic->devcmd_lock);
176
177 return err;
178}
179
180int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp)
181{
182 int err;
183
184 spin_lock(&enic->devcmd_lock);
185 err = vnic_dev_init_prov(enic->vdev,
186 (u8 *)vp, vic_provinfo_size(vp));
187 spin_unlock(&enic->devcmd_lock);
188
189 return err;
190}
191
192int enic_dev_init_done(struct enic *enic, int *done, int *error)
193{
194 int err;
195
196 spin_lock(&enic->devcmd_lock);
197 err = vnic_dev_init_done(enic->vdev, done, error);
198 spin_unlock(&enic->devcmd_lock);
199
200 return err;
201}
202
203/* rtnl lock is held */
204void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
205{
206 struct enic *enic = netdev_priv(netdev);
207
208 spin_lock(&enic->devcmd_lock);
209 enic_add_vlan(enic, vid);
210 spin_unlock(&enic->devcmd_lock);
211}
212
213/* rtnl lock is held */
214void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
215{
216 struct enic *enic = netdev_priv(netdev);
217
218 spin_lock(&enic->devcmd_lock);
219 enic_del_vlan(enic, vid);
220 spin_unlock(&enic->devcmd_lock);
221}
diff --git a/drivers/net/enic/enic_dev.h b/drivers/net/enic/enic_dev.h
new file mode 100644
index 00000000000..495f57fcb88
--- /dev/null
+++ b/drivers/net/enic/enic_dev.h
@@ -0,0 +1,41 @@
1/*
2 * Copyright 2011 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 *
17 */
18
19#ifndef _ENIC_DEV_H_
20#define _ENIC_DEV_H_
21
22int enic_dev_fw_info(struct enic *enic, struct vnic_devcmd_fw_info **fw_info);
23int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats);
24int enic_dev_add_station_addr(struct enic *enic);
25int enic_dev_del_station_addr(struct enic *enic);
26int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
27 int broadcast, int promisc, int allmulti);
28int enic_dev_add_addr(struct enic *enic, u8 *addr);
29int enic_dev_del_addr(struct enic *enic, u8 *addr);
30void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
31void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
32int enic_dev_notify_unset(struct enic *enic);
33int enic_dev_hang_notify(struct enic *enic);
34int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic);
35int enic_dev_enable(struct enic *enic);
36int enic_dev_disable(struct enic *enic);
37int enic_vnic_dev_deinit(struct enic *enic);
38int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp);
39int enic_dev_init_done(struct enic *enic, int *done, int *error);
40
41#endif /* _ENIC_DEV_H_ */
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index a0af48c51fb..8b9cad5e971 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -44,6 +44,7 @@
44#include "vnic_vic.h" 44#include "vnic_vic.h"
45#include "enic_res.h" 45#include "enic_res.h"
46#include "enic.h" 46#include "enic.h"
47#include "enic_dev.h"
47 48
48#define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ) 49#define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
49#define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS) 50#define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS)
@@ -190,18 +191,6 @@ static int enic_get_settings(struct net_device *netdev,
190 return 0; 191 return 0;
191} 192}
192 193
193static int enic_dev_fw_info(struct enic *enic,
194 struct vnic_devcmd_fw_info **fw_info)
195{
196 int err;
197
198 spin_lock(&enic->devcmd_lock);
199 err = vnic_dev_fw_info(enic->vdev, fw_info);
200 spin_unlock(&enic->devcmd_lock);
201
202 return err;
203}
204
205static void enic_get_drvinfo(struct net_device *netdev, 194static void enic_get_drvinfo(struct net_device *netdev,
206 struct ethtool_drvinfo *drvinfo) 195 struct ethtool_drvinfo *drvinfo)
207{ 196{
@@ -246,17 +235,6 @@ static int enic_get_sset_count(struct net_device *netdev, int sset)
246 } 235 }
247} 236}
248 237
249static int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats)
250{
251 int err;
252
253 spin_lock(&enic->devcmd_lock);
254 err = vnic_dev_stats_dump(enic->vdev, vstats);
255 spin_unlock(&enic->devcmd_lock);
256
257 return err;
258}
259
260static void enic_get_ethtool_stats(struct net_device *netdev, 238static void enic_get_ethtool_stats(struct net_device *netdev,
261 struct ethtool_stats *stats, u64 *data) 239 struct ethtool_stats *stats, u64 *data)
262{ 240{
@@ -896,9 +874,10 @@ static struct net_device_stats *enic_get_stats(struct net_device *netdev)
896 return net_stats; 874 return net_stats;
897} 875}
898 876
899static void enic_reset_multicast_list(struct enic *enic) 877static void enic_reset_addr_lists(struct enic *enic)
900{ 878{
901 enic->mc_count = 0; 879 enic->mc_count = 0;
880 enic->uc_count = 0;
902 enic->flags = 0; 881 enic->flags = 0;
903} 882}
904 883
@@ -919,32 +898,6 @@ static int enic_set_mac_addr(struct net_device *netdev, char *addr)
919 return 0; 898 return 0;
920} 899}
921 900
922static int enic_dev_add_station_addr(struct enic *enic)
923{
924 int err = 0;
925
926 if (is_valid_ether_addr(enic->netdev->dev_addr)) {
927 spin_lock(&enic->devcmd_lock);
928 err = vnic_dev_add_addr(enic->vdev, enic->netdev->dev_addr);
929 spin_unlock(&enic->devcmd_lock);
930 }
931
932 return err;
933}
934
935static int enic_dev_del_station_addr(struct enic *enic)
936{
937 int err = 0;
938
939 if (is_valid_ether_addr(enic->netdev->dev_addr)) {
940 spin_lock(&enic->devcmd_lock);
941 err = vnic_dev_del_addr(enic->vdev, enic->netdev->dev_addr);
942 spin_unlock(&enic->devcmd_lock);
943 }
944
945 return err;
946}
947
948static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p) 901static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
949{ 902{
950 struct enic *enic = netdev_priv(netdev); 903 struct enic *enic = netdev_priv(netdev);
@@ -989,42 +942,7 @@ static int enic_set_mac_address(struct net_device *netdev, void *p)
989 return enic_dev_add_station_addr(enic); 942 return enic_dev_add_station_addr(enic);
990} 943}
991 944
992static int enic_dev_packet_filter(struct enic *enic, int directed, 945static void enic_update_multicast_addr_list(struct enic *enic)
993 int multicast, int broadcast, int promisc, int allmulti)
994{
995 int err;
996
997 spin_lock(&enic->devcmd_lock);
998 err = vnic_dev_packet_filter(enic->vdev, directed,
999 multicast, broadcast, promisc, allmulti);
1000 spin_unlock(&enic->devcmd_lock);
1001
1002 return err;
1003}
1004
1005static int enic_dev_add_addr(struct enic *enic, u8 *addr)
1006{
1007 int err;
1008
1009 spin_lock(&enic->devcmd_lock);
1010 err = vnic_dev_add_addr(enic->vdev, addr);
1011 spin_unlock(&enic->devcmd_lock);
1012
1013 return err;
1014}
1015
1016static int enic_dev_del_addr(struct enic *enic, u8 *addr)
1017{
1018 int err;
1019
1020 spin_lock(&enic->devcmd_lock);
1021 err = vnic_dev_del_addr(enic->vdev, addr);
1022 spin_unlock(&enic->devcmd_lock);
1023
1024 return err;
1025}
1026
1027static void enic_add_multicast_addr_list(struct enic *enic)
1028{ 946{
1029 struct net_device *netdev = enic->netdev; 947 struct net_device *netdev = enic->netdev;
1030 struct netdev_hw_addr *ha; 948 struct netdev_hw_addr *ha;
@@ -1079,7 +997,7 @@ static void enic_add_multicast_addr_list(struct enic *enic)
1079 enic->mc_count = mc_count; 997 enic->mc_count = mc_count;
1080} 998}
1081 999
1082static void enic_add_unicast_addr_list(struct enic *enic) 1000static void enic_update_unicast_addr_list(struct enic *enic)
1083{ 1001{
1084 struct net_device *netdev = enic->netdev; 1002 struct net_device *netdev = enic->netdev;
1085 struct netdev_hw_addr *ha; 1003 struct netdev_hw_addr *ha;
@@ -1156,9 +1074,9 @@ static void enic_set_rx_mode(struct net_device *netdev)
1156 } 1074 }
1157 1075
1158 if (!promisc) { 1076 if (!promisc) {
1159 enic_add_unicast_addr_list(enic); 1077 enic_update_unicast_addr_list(enic);
1160 if (!allmulti) 1078 if (!allmulti)
1161 enic_add_multicast_addr_list(enic); 1079 enic_update_multicast_addr_list(enic);
1162 } 1080 }
1163} 1081}
1164 1082
@@ -1170,26 +1088,6 @@ static void enic_vlan_rx_register(struct net_device *netdev,
1170 enic->vlan_group = vlan_group; 1088 enic->vlan_group = vlan_group;
1171} 1089}
1172 1090
1173/* rtnl lock is held */
1174static void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1175{
1176 struct enic *enic = netdev_priv(netdev);
1177
1178 spin_lock(&enic->devcmd_lock);
1179 enic_add_vlan(enic, vid);
1180 spin_unlock(&enic->devcmd_lock);
1181}
1182
1183/* rtnl lock is held */
1184static void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1185{
1186 struct enic *enic = netdev_priv(netdev);
1187
1188 spin_lock(&enic->devcmd_lock);
1189 enic_del_vlan(enic, vid);
1190 spin_unlock(&enic->devcmd_lock);
1191}
1192
1193/* netif_tx_lock held, BHs disabled */ 1091/* netif_tx_lock held, BHs disabled */
1194static void enic_tx_timeout(struct net_device *netdev) 1092static void enic_tx_timeout(struct net_device *netdev)
1195{ 1093{
@@ -1197,40 +1095,6 @@ static void enic_tx_timeout(struct net_device *netdev)
1197 schedule_work(&enic->reset); 1095 schedule_work(&enic->reset);
1198} 1096}
1199 1097
1200static int enic_vnic_dev_deinit(struct enic *enic)
1201{
1202 int err;
1203
1204 spin_lock(&enic->devcmd_lock);
1205 err = vnic_dev_deinit(enic->vdev);
1206 spin_unlock(&enic->devcmd_lock);
1207
1208 return err;
1209}
1210
1211static int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp)
1212{
1213 int err;
1214
1215 spin_lock(&enic->devcmd_lock);
1216 err = vnic_dev_init_prov(enic->vdev,
1217 (u8 *)vp, vic_provinfo_size(vp));
1218 spin_unlock(&enic->devcmd_lock);
1219
1220 return err;
1221}
1222
1223static int enic_dev_init_done(struct enic *enic, int *done, int *error)
1224{
1225 int err;
1226
1227 spin_lock(&enic->devcmd_lock);
1228 err = vnic_dev_init_done(enic->vdev, done, error);
1229 spin_unlock(&enic->devcmd_lock);
1230
1231 return err;
1232}
1233
1234static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) 1098static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1235{ 1099{
1236 struct enic *enic = netdev_priv(netdev); 1100 struct enic *enic = netdev_priv(netdev);
@@ -1262,6 +1126,8 @@ static int enic_set_port_profile(struct enic *enic, u8 *mac)
1262 if (err) 1126 if (err)
1263 return err; 1127 return err;
1264 1128
1129 enic_reset_addr_lists(enic);
1130
1265 switch (enic->pp.request) { 1131 switch (enic->pp.request) {
1266 1132
1267 case PORT_REQUEST_ASSOCIATE: 1133 case PORT_REQUEST_ASSOCIATE:
@@ -1318,18 +1184,20 @@ static int enic_set_port_profile(struct enic *enic, u8 *mac)
1318 vic_provinfo_free(vp); 1184 vic_provinfo_free(vp);
1319 if (err) 1185 if (err)
1320 return err; 1186 return err;
1321
1322 enic->pp.set |= ENIC_SET_APPLIED;
1323 break; 1187 break;
1324 1188
1325 case PORT_REQUEST_DISASSOCIATE: 1189 case PORT_REQUEST_DISASSOCIATE:
1326 enic->pp.set &= ~ENIC_SET_APPLIED;
1327 break; 1190 break;
1328 1191
1329 default: 1192 default:
1330 return -EINVAL; 1193 return -EINVAL;
1331 } 1194 }
1332 1195
1196 /* Set flag to indicate that the port assoc/disassoc
1197 * request has been sent out to fw
1198 */
1199 enic->pp.set |= ENIC_PORT_REQUEST_APPLIED;
1200
1333 return 0; 1201 return 0;
1334} 1202}
1335 1203
@@ -1379,9 +1247,6 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
1379 1247
1380 if (is_zero_ether_addr(netdev->dev_addr)) 1248 if (is_zero_ether_addr(netdev->dev_addr))
1381 random_ether_addr(netdev->dev_addr); 1249 random_ether_addr(netdev->dev_addr);
1382 } else if (new_pp.request == PORT_REQUEST_DISASSOCIATE) {
1383 if (!is_zero_ether_addr(enic->pp.mac_addr))
1384 enic_dev_del_addr(enic, enic->pp.mac_addr);
1385 } 1250 }
1386 1251
1387 memcpy(&enic->pp, &new_pp, sizeof(struct enic_port_profile)); 1252 memcpy(&enic->pp, &new_pp, sizeof(struct enic_port_profile));
@@ -1390,9 +1255,6 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
1390 if (err) 1255 if (err)
1391 goto set_port_profile_cleanup; 1256 goto set_port_profile_cleanup;
1392 1257
1393 if (!is_zero_ether_addr(enic->pp.mac_addr))
1394 enic_dev_add_addr(enic, enic->pp.mac_addr);
1395
1396set_port_profile_cleanup: 1258set_port_profile_cleanup:
1397 memset(enic->pp.vf_mac, 0, ETH_ALEN); 1259 memset(enic->pp.vf_mac, 0, ETH_ALEN);
1398 1260
@@ -1411,7 +1273,7 @@ static int enic_get_vf_port(struct net_device *netdev, int vf,
1411 int err, error, done; 1273 int err, error, done;
1412 u16 response = PORT_PROFILE_RESPONSE_SUCCESS; 1274 u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
1413 1275
1414 if (!(enic->pp.set & ENIC_SET_APPLIED)) 1276 if (!(enic->pp.set & ENIC_PORT_REQUEST_APPLIED))
1415 return -ENODATA; 1277 return -ENODATA;
1416 1278
1417 err = enic_dev_init_done(enic, &done, &error); 1279 err = enic_dev_init_done(enic, &done, &error);
@@ -1489,62 +1351,6 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
1489 return 0; 1351 return 0;
1490} 1352}
1491 1353
1492static int enic_rq_alloc_buf_a1(struct vnic_rq *rq)
1493{
1494 struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
1495
1496 if (vnic_rq_posting_soon(rq)) {
1497
1498 /* SW workaround for A0 HW erratum: if we're just about
1499 * to write posted_index, insert a dummy desc
1500 * of type resvd
1501 */
1502
1503 rq_enet_desc_enc(desc, 0, RQ_ENET_TYPE_RESV2, 0);
1504 vnic_rq_post(rq, 0, 0, 0, 0);
1505 } else {
1506 return enic_rq_alloc_buf(rq);
1507 }
1508
1509 return 0;
1510}
1511
1512static int enic_dev_hw_version(struct enic *enic,
1513 enum vnic_dev_hw_version *hw_ver)
1514{
1515 int err;
1516
1517 spin_lock(&enic->devcmd_lock);
1518 err = vnic_dev_hw_version(enic->vdev, hw_ver);
1519 spin_unlock(&enic->devcmd_lock);
1520
1521 return err;
1522}
1523
1524static int enic_set_rq_alloc_buf(struct enic *enic)
1525{
1526 enum vnic_dev_hw_version hw_ver;
1527 int err;
1528
1529 err = enic_dev_hw_version(enic, &hw_ver);
1530 if (err)
1531 return err;
1532
1533 switch (hw_ver) {
1534 case VNIC_DEV_HW_VER_A1:
1535 enic->rq_alloc_buf = enic_rq_alloc_buf_a1;
1536 break;
1537 case VNIC_DEV_HW_VER_A2:
1538 case VNIC_DEV_HW_VER_UNKNOWN:
1539 enic->rq_alloc_buf = enic_rq_alloc_buf;
1540 break;
1541 default:
1542 return -ENODEV;
1543 }
1544
1545 return 0;
1546}
1547
1548static void enic_rq_indicate_buf(struct vnic_rq *rq, 1354static void enic_rq_indicate_buf(struct vnic_rq *rq,
1549 struct cq_desc *cq_desc, struct vnic_rq_buf *buf, 1355 struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
1550 int skipped, void *opaque) 1356 int skipped, void *opaque)
@@ -1681,7 +1487,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
1681 0 /* don't unmask intr */, 1487 0 /* don't unmask intr */,
1682 0 /* don't reset intr timer */); 1488 0 /* don't reset intr timer */);
1683 1489
1684 err = vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf); 1490 err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
1685 1491
1686 /* Buffer allocation failed. Stay in polling 1492 /* Buffer allocation failed. Stay in polling
1687 * mode so we can try to fill the ring again. 1493 * mode so we can try to fill the ring again.
@@ -1731,7 +1537,7 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
1731 0 /* don't unmask intr */, 1537 0 /* don't unmask intr */,
1732 0 /* don't reset intr timer */); 1538 0 /* don't reset intr timer */);
1733 1539
1734 err = vnic_rq_fill(&enic->rq[rq], enic->rq_alloc_buf); 1540 err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
1735 1541
1736 /* Buffer allocation failed. Stay in polling mode 1542 /* Buffer allocation failed. Stay in polling mode
1737 * so we can try to fill the ring again. 1543 * so we can try to fill the ring again.
@@ -1901,39 +1707,6 @@ static int enic_dev_notify_set(struct enic *enic)
1901 return err; 1707 return err;
1902} 1708}
1903 1709
1904static int enic_dev_notify_unset(struct enic *enic)
1905{
1906 int err;
1907
1908 spin_lock(&enic->devcmd_lock);
1909 err = vnic_dev_notify_unset(enic->vdev);
1910 spin_unlock(&enic->devcmd_lock);
1911
1912 return err;
1913}
1914
1915static int enic_dev_enable(struct enic *enic)
1916{
1917 int err;
1918
1919 spin_lock(&enic->devcmd_lock);
1920 err = vnic_dev_enable_wait(enic->vdev);
1921 spin_unlock(&enic->devcmd_lock);
1922
1923 return err;
1924}
1925
1926static int enic_dev_disable(struct enic *enic)
1927{
1928 int err;
1929
1930 spin_lock(&enic->devcmd_lock);
1931 err = vnic_dev_disable(enic->vdev);
1932 spin_unlock(&enic->devcmd_lock);
1933
1934 return err;
1935}
1936
1937static void enic_notify_timer_start(struct enic *enic) 1710static void enic_notify_timer_start(struct enic *enic)
1938{ 1711{
1939 switch (vnic_dev_get_intr_mode(enic->vdev)) { 1712 switch (vnic_dev_get_intr_mode(enic->vdev)) {
@@ -1967,7 +1740,7 @@ static int enic_open(struct net_device *netdev)
1967 } 1740 }
1968 1741
1969 for (i = 0; i < enic->rq_count; i++) { 1742 for (i = 0; i < enic->rq_count; i++) {
1970 vnic_rq_fill(&enic->rq[i], enic->rq_alloc_buf); 1743 vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
1971 /* Need at least one buffer on ring to get going */ 1744 /* Need at least one buffer on ring to get going */
1972 if (vnic_rq_desc_used(&enic->rq[i]) == 0) { 1745 if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
1973 netdev_err(netdev, "Unable to alloc receive buffers\n"); 1746 netdev_err(netdev, "Unable to alloc receive buffers\n");
@@ -2285,29 +2058,6 @@ static int enic_set_rss_nic_cfg(struct enic *enic)
2285 rss_hash_bits, rss_base_cpu, rss_enable); 2058 rss_hash_bits, rss_base_cpu, rss_enable);
2286} 2059}
2287 2060
2288static int enic_dev_hang_notify(struct enic *enic)
2289{
2290 int err;
2291
2292 spin_lock(&enic->devcmd_lock);
2293 err = vnic_dev_hang_notify(enic->vdev);
2294 spin_unlock(&enic->devcmd_lock);
2295
2296 return err;
2297}
2298
2299static int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic)
2300{
2301 int err;
2302
2303 spin_lock(&enic->devcmd_lock);
2304 err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
2305 IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN);
2306 spin_unlock(&enic->devcmd_lock);
2307
2308 return err;
2309}
2310
2311static void enic_reset(struct work_struct *work) 2061static void enic_reset(struct work_struct *work)
2312{ 2062{
2313 struct enic *enic = container_of(work, struct enic, reset); 2063 struct enic *enic = container_of(work, struct enic, reset);
@@ -2320,7 +2070,7 @@ static void enic_reset(struct work_struct *work)
2320 enic_dev_hang_notify(enic); 2070 enic_dev_hang_notify(enic);
2321 enic_stop(enic->netdev); 2071 enic_stop(enic->netdev);
2322 enic_dev_hang_reset(enic); 2072 enic_dev_hang_reset(enic);
2323 enic_reset_multicast_list(enic); 2073 enic_reset_addr_lists(enic);
2324 enic_init_vnic_resources(enic); 2074 enic_init_vnic_resources(enic);
2325 enic_set_rss_nic_cfg(enic); 2075 enic_set_rss_nic_cfg(enic);
2326 enic_dev_set_ig_vlan_rewrite_mode(enic); 2076 enic_dev_set_ig_vlan_rewrite_mode(enic);
@@ -2332,7 +2082,7 @@ static void enic_reset(struct work_struct *work)
2332static int enic_set_intr_mode(struct enic *enic) 2082static int enic_set_intr_mode(struct enic *enic)
2333{ 2083{
2334 unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX); 2084 unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX);
2335 unsigned int m = 1; 2085 unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX);
2336 unsigned int i; 2086 unsigned int i;
2337 2087
2338 /* Set interrupt mode (INTx, MSI, MSI-X) depending 2088 /* Set interrupt mode (INTx, MSI, MSI-X) depending
@@ -2475,9 +2225,7 @@ static const struct net_device_ops enic_netdev_dynamic_ops = {
2475 .ndo_tx_timeout = enic_tx_timeout, 2225 .ndo_tx_timeout = enic_tx_timeout,
2476 .ndo_set_vf_port = enic_set_vf_port, 2226 .ndo_set_vf_port = enic_set_vf_port,
2477 .ndo_get_vf_port = enic_get_vf_port, 2227 .ndo_get_vf_port = enic_get_vf_port,
2478#ifdef IFLA_VF_MAX
2479 .ndo_set_vf_mac = enic_set_vf_mac, 2228 .ndo_set_vf_mac = enic_set_vf_mac,
2480#endif
2481#ifdef CONFIG_NET_POLL_CONTROLLER 2229#ifdef CONFIG_NET_POLL_CONTROLLER
2482 .ndo_poll_controller = enic_poll_controller, 2230 .ndo_poll_controller = enic_poll_controller,
2483#endif 2231#endif
@@ -2556,25 +2304,12 @@ static int enic_dev_init(struct enic *enic)
2556 2304
2557 enic_init_vnic_resources(enic); 2305 enic_init_vnic_resources(enic);
2558 2306
2559 err = enic_set_rq_alloc_buf(enic);
2560 if (err) {
2561 dev_err(dev, "Failed to set RQ buffer allocator, aborting\n");
2562 goto err_out_free_vnic_resources;
2563 }
2564
2565 err = enic_set_rss_nic_cfg(enic); 2307 err = enic_set_rss_nic_cfg(enic);
2566 if (err) { 2308 if (err) {
2567 dev_err(dev, "Failed to config nic, aborting\n"); 2309 dev_err(dev, "Failed to config nic, aborting\n");
2568 goto err_out_free_vnic_resources; 2310 goto err_out_free_vnic_resources;
2569 } 2311 }
2570 2312
2571 err = enic_dev_set_ig_vlan_rewrite_mode(enic);
2572 if (err) {
2573 dev_err(dev,
2574 "Failed to set ingress vlan rewrite mode, aborting.\n");
2575 goto err_out_free_vnic_resources;
2576 }
2577
2578 switch (vnic_dev_get_intr_mode(enic->vdev)) { 2313 switch (vnic_dev_get_intr_mode(enic->vdev)) {
2579 default: 2314 default:
2580 netif_napi_add(netdev, &enic->napi[0], enic_poll, 64); 2315 netif_napi_add(netdev, &enic->napi[0], enic_poll, 64);
@@ -2713,6 +2448,22 @@ static int __devinit enic_probe(struct pci_dev *pdev,
2713 goto err_out_vnic_unregister; 2448 goto err_out_vnic_unregister;
2714 } 2449 }
2715 2450
2451 /* Setup devcmd lock
2452 */
2453
2454 spin_lock_init(&enic->devcmd_lock);
2455
2456 /*
2457 * Set ingress vlan rewrite mode before vnic initialization
2458 */
2459
2460 err = enic_dev_set_ig_vlan_rewrite_mode(enic);
2461 if (err) {
2462 dev_err(dev,
2463 "Failed to set ingress vlan rewrite mode, aborting.\n");
2464 goto err_out_dev_close;
2465 }
2466
2716 /* Issue device init to initialize the vnic-to-switch link. 2467 /* Issue device init to initialize the vnic-to-switch link.
2717 * We'll start with carrier off and wait for link UP 2468 * We'll start with carrier off and wait for link UP
2718 * notification later to turn on carrier. We don't need 2469 * notification later to turn on carrier. We don't need
@@ -2736,11 +2487,6 @@ static int __devinit enic_probe(struct pci_dev *pdev,
2736 } 2487 }
2737 } 2488 }
2738 2489
2739 /* Setup devcmd lock
2740 */
2741
2742 spin_lock_init(&enic->devcmd_lock);
2743
2744 err = enic_dev_init(enic); 2490 err = enic_dev_init(enic);
2745 if (err) { 2491 if (err) {
2746 dev_err(dev, "Device initialization failed, aborting\n"); 2492 dev_err(dev, "Device initialization failed, aborting\n");
diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c
index fb35d8b1766..c489e72107d 100644
--- a/drivers/net/enic/vnic_dev.c
+++ b/drivers/net/enic/vnic_dev.c
@@ -419,25 +419,6 @@ int vnic_dev_fw_info(struct vnic_dev *vdev,
419 return err; 419 return err;
420} 420}
421 421
422int vnic_dev_hw_version(struct vnic_dev *vdev, enum vnic_dev_hw_version *hw_ver)
423{
424 struct vnic_devcmd_fw_info *fw_info;
425 int err;
426
427 err = vnic_dev_fw_info(vdev, &fw_info);
428 if (err)
429 return err;
430
431 if (strncmp(fw_info->hw_version, "A1", sizeof("A1")) == 0)
432 *hw_ver = VNIC_DEV_HW_VER_A1;
433 else if (strncmp(fw_info->hw_version, "A2", sizeof("A2")) == 0)
434 *hw_ver = VNIC_DEV_HW_VER_A2;
435 else
436 *hw_ver = VNIC_DEV_HW_VER_UNKNOWN;
437
438 return 0;
439}
440
441int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size, 422int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
442 void *value) 423 void *value)
443{ 424{
diff --git a/drivers/net/enic/vnic_dev.h b/drivers/net/enic/vnic_dev.h
index 05f9a24cd45..e837546213a 100644
--- a/drivers/net/enic/vnic_dev.h
+++ b/drivers/net/enic/vnic_dev.h
@@ -44,12 +44,6 @@ static inline void writeq(u64 val, void __iomem *reg)
44#undef pr_fmt 44#undef pr_fmt
45#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 45#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
46 46
47enum vnic_dev_hw_version {
48 VNIC_DEV_HW_VER_UNKNOWN,
49 VNIC_DEV_HW_VER_A1,
50 VNIC_DEV_HW_VER_A2,
51};
52
53enum vnic_dev_intr_mode { 47enum vnic_dev_intr_mode {
54 VNIC_DEV_INTR_MODE_UNKNOWN, 48 VNIC_DEV_INTR_MODE_UNKNOWN,
55 VNIC_DEV_INTR_MODE_INTX, 49 VNIC_DEV_INTR_MODE_INTX,
@@ -93,8 +87,6 @@ int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
93 u64 *a0, u64 *a1, int wait); 87 u64 *a0, u64 *a1, int wait);
94int vnic_dev_fw_info(struct vnic_dev *vdev, 88int vnic_dev_fw_info(struct vnic_dev *vdev,
95 struct vnic_devcmd_fw_info **fw_info); 89 struct vnic_devcmd_fw_info **fw_info);
96int vnic_dev_hw_version(struct vnic_dev *vdev,
97 enum vnic_dev_hw_version *hw_ver);
98int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size, 90int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
99 void *value); 91 void *value);
100int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats); 92int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
diff --git a/drivers/net/enic/vnic_rq.h b/drivers/net/enic/vnic_rq.h
index 37f08de2454..2056586f4d4 100644
--- a/drivers/net/enic/vnic_rq.h
+++ b/drivers/net/enic/vnic_rq.h
@@ -141,11 +141,6 @@ static inline void vnic_rq_post(struct vnic_rq *rq,
141 } 141 }
142} 142}
143 143
144static inline int vnic_rq_posting_soon(struct vnic_rq *rq)
145{
146 return (rq->to_use->index & VNIC_RQ_RETURN_RATE) == 0;
147}
148
149static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count) 144static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
150{ 145{
151 rq->ring.desc_avail += count; 146 rq->ring.desc_avail += count;
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index 0cb1cf9cf4b..a59cf961a43 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -111,6 +111,8 @@
111 * Sorry, I had to rewrite most of this for 2.5.x -DaveM 111 * Sorry, I had to rewrite most of this for 2.5.x -DaveM
112 */ 112 */
113 113
114#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
115
114#include <linux/capability.h> 116#include <linux/capability.h>
115#include <linux/module.h> 117#include <linux/module.h>
116#include <linux/kernel.h> 118#include <linux/kernel.h>
@@ -162,7 +164,7 @@ static void eql_timer(unsigned long param)
162} 164}
163 165
164static const char version[] __initconst = 166static const char version[] __initconst =
165 "Equalizer2002: Simon Janes (simon@ncm.com) and David S. Miller (davem@redhat.com)\n"; 167 "Equalizer2002: Simon Janes (simon@ncm.com) and David S. Miller (davem@redhat.com)";
166 168
167static const struct net_device_ops eql_netdev_ops = { 169static const struct net_device_ops eql_netdev_ops = {
168 .ndo_open = eql_open, 170 .ndo_open = eql_open,
@@ -204,8 +206,8 @@ static int eql_open(struct net_device *dev)
204 equalizer_t *eql = netdev_priv(dev); 206 equalizer_t *eql = netdev_priv(dev);
205 207
206 /* XXX We should force this off automatically for the user. */ 208 /* XXX We should force this off automatically for the user. */
207 printk(KERN_INFO "%s: remember to turn off Van-Jacobson compression on " 209 netdev_info(dev,
208 "your slave devices.\n", dev->name); 210 "remember to turn off Van-Jacobson compression on your slave devices\n");
209 211
210 BUG_ON(!list_empty(&eql->queue.all_slaves)); 212 BUG_ON(!list_empty(&eql->queue.all_slaves));
211 213
@@ -591,7 +593,7 @@ static int __init eql_init_module(void)
591{ 593{
592 int err; 594 int err;
593 595
594 printk(version); 596 pr_info("%s\n", version);
595 597
596 dev_eql = alloc_netdev(sizeof(equalizer_t), "eql", eql_setup); 598 dev_eql = alloc_netdev(sizeof(equalizer_t), "eql", eql_setup);
597 if (!dev_eql) 599 if (!dev_eql)
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index cd0282d5d40..885d8baff7d 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -54,7 +54,7 @@
54 54
55#include "fec.h" 55#include "fec.h"
56 56
57#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28) 57#if defined(CONFIG_ARM)
58#define FEC_ALIGNMENT 0xf 58#define FEC_ALIGNMENT 0xf
59#else 59#else
60#define FEC_ALIGNMENT 0x3 60#define FEC_ALIGNMENT 0x3
@@ -148,8 +148,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
148 * account when setting it. 148 * account when setting it.
149 */ 149 */
150#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 150#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
151 defined(CONFIG_M520x) || defined(CONFIG_M532x) || \ 151 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
152 defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
153#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) 152#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
154#else 153#else
155#define OPT_FRAME_SIZE 0 154#define OPT_FRAME_SIZE 0
@@ -184,7 +183,7 @@ struct fec_enet_private {
184 struct bufdesc *rx_bd_base; 183 struct bufdesc *rx_bd_base;
185 struct bufdesc *tx_bd_base; 184 struct bufdesc *tx_bd_base;
186 /* The next free ring entry */ 185 /* The next free ring entry */
187 struct bufdesc *cur_rx, *cur_tx; 186 struct bufdesc *cur_rx, *cur_tx;
188 /* The ring entries to be free()ed */ 187 /* The ring entries to be free()ed */
189 struct bufdesc *dirty_tx; 188 struct bufdesc *dirty_tx;
190 189
@@ -192,28 +191,21 @@ struct fec_enet_private {
192 /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */ 191 /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
193 spinlock_t hw_lock; 192 spinlock_t hw_lock;
194 193
195 struct platform_device *pdev; 194 struct platform_device *pdev;
196 195
197 int opened; 196 int opened;
198 197
199 /* Phylib and MDIO interface */ 198 /* Phylib and MDIO interface */
200 struct mii_bus *mii_bus; 199 struct mii_bus *mii_bus;
201 struct phy_device *phy_dev; 200 struct phy_device *phy_dev;
202 int mii_timeout; 201 int mii_timeout;
203 uint phy_speed; 202 uint phy_speed;
204 phy_interface_t phy_interface; 203 phy_interface_t phy_interface;
205 int link; 204 int link;
206 int full_duplex; 205 int full_duplex;
207 struct completion mdio_done; 206 struct completion mdio_done;
208}; 207};
209 208
210static irqreturn_t fec_enet_interrupt(int irq, void * dev_id);
211static void fec_enet_tx(struct net_device *dev);
212static void fec_enet_rx(struct net_device *dev);
213static int fec_enet_close(struct net_device *dev);
214static void fec_restart(struct net_device *dev, int duplex);
215static void fec_stop(struct net_device *dev);
216
217/* FEC MII MMFR bits definition */ 209/* FEC MII MMFR bits definition */
218#define FEC_MMFR_ST (1 << 30) 210#define FEC_MMFR_ST (1 << 30)
219#define FEC_MMFR_OP_READ (2 << 28) 211#define FEC_MMFR_OP_READ (2 << 28)
@@ -240,9 +232,9 @@ static void *swap_buffer(void *bufaddr, int len)
240} 232}
241 233
242static netdev_tx_t 234static netdev_tx_t
243fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) 235fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
244{ 236{
245 struct fec_enet_private *fep = netdev_priv(dev); 237 struct fec_enet_private *fep = netdev_priv(ndev);
246 const struct platform_device_id *id_entry = 238 const struct platform_device_id *id_entry =
247 platform_get_device_id(fep->pdev); 239 platform_get_device_id(fep->pdev);
248 struct bufdesc *bdp; 240 struct bufdesc *bdp;
@@ -263,9 +255,9 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
263 255
264 if (status & BD_ENET_TX_READY) { 256 if (status & BD_ENET_TX_READY) {
265 /* Ooops. All transmit buffers are full. Bail out. 257 /* Ooops. All transmit buffers are full. Bail out.
266 * This should not happen, since dev->tbusy should be set. 258 * This should not happen, since ndev->tbusy should be set.
267 */ 259 */
268 printk("%s: tx queue full!.\n", dev->name); 260 printk("%s: tx queue full!.\n", ndev->name);
269 spin_unlock_irqrestore(&fep->hw_lock, flags); 261 spin_unlock_irqrestore(&fep->hw_lock, flags);
270 return NETDEV_TX_BUSY; 262 return NETDEV_TX_BUSY;
271 } 263 }
@@ -285,7 +277,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
285 if (((unsigned long) bufaddr) & FEC_ALIGNMENT) { 277 if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
286 unsigned int index; 278 unsigned int index;
287 index = bdp - fep->tx_bd_base; 279 index = bdp - fep->tx_bd_base;
288 memcpy(fep->tx_bounce[index], (void *)skb->data, skb->len); 280 memcpy(fep->tx_bounce[index], skb->data, skb->len);
289 bufaddr = fep->tx_bounce[index]; 281 bufaddr = fep->tx_bounce[index];
290 } 282 }
291 283
@@ -300,13 +292,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
300 /* Save skb pointer */ 292 /* Save skb pointer */
301 fep->tx_skbuff[fep->skb_cur] = skb; 293 fep->tx_skbuff[fep->skb_cur] = skb;
302 294
303 dev->stats.tx_bytes += skb->len; 295 ndev->stats.tx_bytes += skb->len;
304 fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK; 296 fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK;
305 297
306 /* Push the data cache so the CPM does not get stale memory 298 /* Push the data cache so the CPM does not get stale memory
307 * data. 299 * data.
308 */ 300 */
309 bdp->cbd_bufaddr = dma_map_single(&dev->dev, bufaddr, 301 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
310 FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); 302 FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
311 303
312 /* Send it on its way. Tell FEC it's ready, interrupt when done, 304 /* Send it on its way. Tell FEC it's ready, interrupt when done,
@@ -327,7 +319,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
327 319
328 if (bdp == fep->dirty_tx) { 320 if (bdp == fep->dirty_tx) {
329 fep->tx_full = 1; 321 fep->tx_full = 1;
330 netif_stop_queue(dev); 322 netif_stop_queue(ndev);
331 } 323 }
332 324
333 fep->cur_tx = bdp; 325 fep->cur_tx = bdp;
@@ -337,62 +329,170 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
337 return NETDEV_TX_OK; 329 return NETDEV_TX_OK;
338} 330}
339 331
332/* This function is called to start or restart the FEC during a link
333 * change. This only happens when switching between half and full
334 * duplex.
335 */
340static void 336static void
341fec_timeout(struct net_device *dev) 337fec_restart(struct net_device *ndev, int duplex)
342{ 338{
343 struct fec_enet_private *fep = netdev_priv(dev); 339 struct fec_enet_private *fep = netdev_priv(ndev);
340 const struct platform_device_id *id_entry =
341 platform_get_device_id(fep->pdev);
342 int i;
343 u32 temp_mac[2];
344 u32 rcntl = OPT_FRAME_SIZE | 0x04;
344 345
345 dev->stats.tx_errors++; 346 /* Whack a reset. We should wait for this. */
347 writel(1, fep->hwp + FEC_ECNTRL);
348 udelay(10);
346 349
347 fec_restart(dev, fep->full_duplex); 350 /*
348 netif_wake_queue(dev); 351 * enet-mac reset will reset mac address registers too,
349} 352 * so need to reconfigure it.
353 */
354 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
355 memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
356 writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
357 writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
358 }
350 359
351static irqreturn_t 360 /* Clear any outstanding interrupt. */
352fec_enet_interrupt(int irq, void * dev_id) 361 writel(0xffc00000, fep->hwp + FEC_IEVENT);
353{
354 struct net_device *dev = dev_id;
355 struct fec_enet_private *fep = netdev_priv(dev);
356 uint int_events;
357 irqreturn_t ret = IRQ_NONE;
358 362
359 do { 363 /* Reset all multicast. */
360 int_events = readl(fep->hwp + FEC_IEVENT); 364 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
361 writel(int_events, fep->hwp + FEC_IEVENT); 365 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
366#ifndef CONFIG_M5272
367 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
368 writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
369#endif
362 370
363 if (int_events & FEC_ENET_RXF) { 371 /* Set maximum receive buffer size. */
364 ret = IRQ_HANDLED; 372 writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
365 fec_enet_rx(dev);
366 }
367 373
368 /* Transmit OK, or non-fatal error. Update the buffer 374 /* Set receive and transmit descriptor base. */
369 * descriptors. FEC handles all errors, we just discover 375 writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
370 * them as part of the transmit process. 376 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE,
371 */ 377 fep->hwp + FEC_X_DES_START);
372 if (int_events & FEC_ENET_TXF) { 378
373 ret = IRQ_HANDLED; 379 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
374 fec_enet_tx(dev); 380 fep->cur_rx = fep->rx_bd_base;
381
382 /* Reset SKB transmit buffers. */
383 fep->skb_cur = fep->skb_dirty = 0;
384 for (i = 0; i <= TX_RING_MOD_MASK; i++) {
385 if (fep->tx_skbuff[i]) {
386 dev_kfree_skb_any(fep->tx_skbuff[i]);
387 fep->tx_skbuff[i] = NULL;
375 } 388 }
389 }
376 390
377 if (int_events & FEC_ENET_MII) { 391 /* Enable MII mode */
378 ret = IRQ_HANDLED; 392 if (duplex) {
379 complete(&fep->mdio_done); 393 /* FD enable */
394 writel(0x04, fep->hwp + FEC_X_CNTRL);
395 } else {
396 /* No Rcv on Xmit */
397 rcntl |= 0x02;
398 writel(0x0, fep->hwp + FEC_X_CNTRL);
399 }
400
401 fep->full_duplex = duplex;
402
403 /* Set MII speed */
404 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
405
406 /*
407 * The phy interface and speed need to get configured
408 * differently on enet-mac.
409 */
410 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
411 /* Enable flow control and length check */
412 rcntl |= 0x40000000 | 0x00000020;
413
414 /* MII or RMII */
415 if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
416 rcntl |= (1 << 8);
417 else
418 rcntl &= ~(1 << 8);
419
420 /* 10M or 100M */
421 if (fep->phy_dev && fep->phy_dev->speed == SPEED_100)
422 rcntl &= ~(1 << 9);
423 else
424 rcntl |= (1 << 9);
425
426 } else {
427#ifdef FEC_MIIGSK_ENR
428 if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) {
429 /* disable the gasket and wait */
430 writel(0, fep->hwp + FEC_MIIGSK_ENR);
431 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
432 udelay(1);
433
434 /*
435 * configure the gasket:
436 * RMII, 50 MHz, no loopback, no echo
437 */
438 writel(1, fep->hwp + FEC_MIIGSK_CFGR);
439
440 /* re-enable the gasket */
441 writel(2, fep->hwp + FEC_MIIGSK_ENR);
380 } 442 }
381 } while (int_events); 443#endif
444 }
445 writel(rcntl, fep->hwp + FEC_R_CNTRL);
382 446
383 return ret; 447 /* And last, enable the transmit and receive processing */
448 writel(2, fep->hwp + FEC_ECNTRL);
449 writel(0, fep->hwp + FEC_R_DES_ACTIVE);
450
451 /* Enable interrupts we wish to service */
452 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
453}
454
455static void
456fec_stop(struct net_device *ndev)
457{
458 struct fec_enet_private *fep = netdev_priv(ndev);
459
460 /* We cannot expect a graceful transmit stop without link !!! */
461 if (fep->link) {
462 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
463 udelay(10);
464 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
465 printk("fec_stop : Graceful transmit stop did not complete !\n");
466 }
467
468 /* Whack a reset. We should wait for this. */
469 writel(1, fep->hwp + FEC_ECNTRL);
470 udelay(10);
471 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
472 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
384} 473}
385 474
386 475
387static void 476static void
388fec_enet_tx(struct net_device *dev) 477fec_timeout(struct net_device *ndev)
478{
479 struct fec_enet_private *fep = netdev_priv(ndev);
480
481 ndev->stats.tx_errors++;
482
483 fec_restart(ndev, fep->full_duplex);
484 netif_wake_queue(ndev);
485}
486
487static void
488fec_enet_tx(struct net_device *ndev)
389{ 489{
390 struct fec_enet_private *fep; 490 struct fec_enet_private *fep;
391 struct bufdesc *bdp; 491 struct bufdesc *bdp;
392 unsigned short status; 492 unsigned short status;
393 struct sk_buff *skb; 493 struct sk_buff *skb;
394 494
395 fep = netdev_priv(dev); 495 fep = netdev_priv(ndev);
396 spin_lock(&fep->hw_lock); 496 spin_lock(&fep->hw_lock);
397 bdp = fep->dirty_tx; 497 bdp = fep->dirty_tx;
398 498
@@ -400,7 +500,8 @@ fec_enet_tx(struct net_device *dev)
400 if (bdp == fep->cur_tx && fep->tx_full == 0) 500 if (bdp == fep->cur_tx && fep->tx_full == 0)
401 break; 501 break;
402 502
403 dma_unmap_single(&dev->dev, bdp->cbd_bufaddr, FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); 503 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
504 FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
404 bdp->cbd_bufaddr = 0; 505 bdp->cbd_bufaddr = 0;
405 506
406 skb = fep->tx_skbuff[fep->skb_dirty]; 507 skb = fep->tx_skbuff[fep->skb_dirty];
@@ -408,19 +509,19 @@ fec_enet_tx(struct net_device *dev)
408 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | 509 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
409 BD_ENET_TX_RL | BD_ENET_TX_UN | 510 BD_ENET_TX_RL | BD_ENET_TX_UN |
410 BD_ENET_TX_CSL)) { 511 BD_ENET_TX_CSL)) {
411 dev->stats.tx_errors++; 512 ndev->stats.tx_errors++;
412 if (status & BD_ENET_TX_HB) /* No heartbeat */ 513 if (status & BD_ENET_TX_HB) /* No heartbeat */
413 dev->stats.tx_heartbeat_errors++; 514 ndev->stats.tx_heartbeat_errors++;
414 if (status & BD_ENET_TX_LC) /* Late collision */ 515 if (status & BD_ENET_TX_LC) /* Late collision */
415 dev->stats.tx_window_errors++; 516 ndev->stats.tx_window_errors++;
416 if (status & BD_ENET_TX_RL) /* Retrans limit */ 517 if (status & BD_ENET_TX_RL) /* Retrans limit */
417 dev->stats.tx_aborted_errors++; 518 ndev->stats.tx_aborted_errors++;
418 if (status & BD_ENET_TX_UN) /* Underrun */ 519 if (status & BD_ENET_TX_UN) /* Underrun */
419 dev->stats.tx_fifo_errors++; 520 ndev->stats.tx_fifo_errors++;
420 if (status & BD_ENET_TX_CSL) /* Carrier lost */ 521 if (status & BD_ENET_TX_CSL) /* Carrier lost */
421 dev->stats.tx_carrier_errors++; 522 ndev->stats.tx_carrier_errors++;
422 } else { 523 } else {
423 dev->stats.tx_packets++; 524 ndev->stats.tx_packets++;
424 } 525 }
425 526
426 if (status & BD_ENET_TX_READY) 527 if (status & BD_ENET_TX_READY)
@@ -430,7 +531,7 @@ fec_enet_tx(struct net_device *dev)
430 * but we eventually sent the packet OK. 531 * but we eventually sent the packet OK.
431 */ 532 */
432 if (status & BD_ENET_TX_DEF) 533 if (status & BD_ENET_TX_DEF)
433 dev->stats.collisions++; 534 ndev->stats.collisions++;
434 535
435 /* Free the sk buffer associated with this last transmit */ 536 /* Free the sk buffer associated with this last transmit */
436 dev_kfree_skb_any(skb); 537 dev_kfree_skb_any(skb);
@@ -447,8 +548,8 @@ fec_enet_tx(struct net_device *dev)
447 */ 548 */
448 if (fep->tx_full) { 549 if (fep->tx_full) {
449 fep->tx_full = 0; 550 fep->tx_full = 0;
450 if (netif_queue_stopped(dev)) 551 if (netif_queue_stopped(ndev))
451 netif_wake_queue(dev); 552 netif_wake_queue(ndev);
452 } 553 }
453 } 554 }
454 fep->dirty_tx = bdp; 555 fep->dirty_tx = bdp;
@@ -462,9 +563,9 @@ fec_enet_tx(struct net_device *dev)
462 * effectively tossing the packet. 563 * effectively tossing the packet.
463 */ 564 */
464static void 565static void
465fec_enet_rx(struct net_device *dev) 566fec_enet_rx(struct net_device *ndev)
466{ 567{
467 struct fec_enet_private *fep = netdev_priv(dev); 568 struct fec_enet_private *fep = netdev_priv(ndev);
468 const struct platform_device_id *id_entry = 569 const struct platform_device_id *id_entry =
469 platform_get_device_id(fep->pdev); 570 platform_get_device_id(fep->pdev);
470 struct bufdesc *bdp; 571 struct bufdesc *bdp;
@@ -498,17 +599,17 @@ fec_enet_rx(struct net_device *dev)
498 /* Check for errors. */ 599 /* Check for errors. */
499 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | 600 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
500 BD_ENET_RX_CR | BD_ENET_RX_OV)) { 601 BD_ENET_RX_CR | BD_ENET_RX_OV)) {
501 dev->stats.rx_errors++; 602 ndev->stats.rx_errors++;
502 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { 603 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
503 /* Frame too long or too short. */ 604 /* Frame too long or too short. */
504 dev->stats.rx_length_errors++; 605 ndev->stats.rx_length_errors++;
505 } 606 }
506 if (status & BD_ENET_RX_NO) /* Frame alignment */ 607 if (status & BD_ENET_RX_NO) /* Frame alignment */
507 dev->stats.rx_frame_errors++; 608 ndev->stats.rx_frame_errors++;
508 if (status & BD_ENET_RX_CR) /* CRC Error */ 609 if (status & BD_ENET_RX_CR) /* CRC Error */
509 dev->stats.rx_crc_errors++; 610 ndev->stats.rx_crc_errors++;
510 if (status & BD_ENET_RX_OV) /* FIFO overrun */ 611 if (status & BD_ENET_RX_OV) /* FIFO overrun */
511 dev->stats.rx_fifo_errors++; 612 ndev->stats.rx_fifo_errors++;
512 } 613 }
513 614
514 /* Report late collisions as a frame error. 615 /* Report late collisions as a frame error.
@@ -516,19 +617,19 @@ fec_enet_rx(struct net_device *dev)
516 * have in the buffer. So, just drop this frame on the floor. 617 * have in the buffer. So, just drop this frame on the floor.
517 */ 618 */
518 if (status & BD_ENET_RX_CL) { 619 if (status & BD_ENET_RX_CL) {
519 dev->stats.rx_errors++; 620 ndev->stats.rx_errors++;
520 dev->stats.rx_frame_errors++; 621 ndev->stats.rx_frame_errors++;
521 goto rx_processing_done; 622 goto rx_processing_done;
522 } 623 }
523 624
524 /* Process the incoming frame. */ 625 /* Process the incoming frame. */
525 dev->stats.rx_packets++; 626 ndev->stats.rx_packets++;
526 pkt_len = bdp->cbd_datlen; 627 pkt_len = bdp->cbd_datlen;
527 dev->stats.rx_bytes += pkt_len; 628 ndev->stats.rx_bytes += pkt_len;
528 data = (__u8*)__va(bdp->cbd_bufaddr); 629 data = (__u8*)__va(bdp->cbd_bufaddr);
529 630
530 dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen, 631 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
531 DMA_FROM_DEVICE); 632 FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);
532 633
533 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) 634 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
534 swap_buffer(data, pkt_len); 635 swap_buffer(data, pkt_len);
@@ -542,18 +643,18 @@ fec_enet_rx(struct net_device *dev)
542 643
543 if (unlikely(!skb)) { 644 if (unlikely(!skb)) {
544 printk("%s: Memory squeeze, dropping packet.\n", 645 printk("%s: Memory squeeze, dropping packet.\n",
545 dev->name); 646 ndev->name);
546 dev->stats.rx_dropped++; 647 ndev->stats.rx_dropped++;
547 } else { 648 } else {
548 skb_reserve(skb, NET_IP_ALIGN); 649 skb_reserve(skb, NET_IP_ALIGN);
549 skb_put(skb, pkt_len - 4); /* Make room */ 650 skb_put(skb, pkt_len - 4); /* Make room */
550 skb_copy_to_linear_data(skb, data, pkt_len - 4); 651 skb_copy_to_linear_data(skb, data, pkt_len - 4);
551 skb->protocol = eth_type_trans(skb, dev); 652 skb->protocol = eth_type_trans(skb, ndev);
552 netif_rx(skb); 653 netif_rx(skb);
553 } 654 }
554 655
555 bdp->cbd_bufaddr = dma_map_single(NULL, data, bdp->cbd_datlen, 656 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
556 DMA_FROM_DEVICE); 657 FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);
557rx_processing_done: 658rx_processing_done:
558 /* Clear the status flags for this buffer */ 659 /* Clear the status flags for this buffer */
559 status &= ~BD_ENET_RX_STATS; 660 status &= ~BD_ENET_RX_STATS;
@@ -578,10 +679,47 @@ rx_processing_done:
578 spin_unlock(&fep->hw_lock); 679 spin_unlock(&fep->hw_lock);
579} 680}
580 681
682static irqreturn_t
683fec_enet_interrupt(int irq, void *dev_id)
684{
685 struct net_device *ndev = dev_id;
686 struct fec_enet_private *fep = netdev_priv(ndev);
687 uint int_events;
688 irqreturn_t ret = IRQ_NONE;
689
690 do {
691 int_events = readl(fep->hwp + FEC_IEVENT);
692 writel(int_events, fep->hwp + FEC_IEVENT);
693
694 if (int_events & FEC_ENET_RXF) {
695 ret = IRQ_HANDLED;
696 fec_enet_rx(ndev);
697 }
698
699 /* Transmit OK, or non-fatal error. Update the buffer
700 * descriptors. FEC handles all errors, we just discover
701 * them as part of the transmit process.
702 */
703 if (int_events & FEC_ENET_TXF) {
704 ret = IRQ_HANDLED;
705 fec_enet_tx(ndev);
706 }
707
708 if (int_events & FEC_ENET_MII) {
709 ret = IRQ_HANDLED;
710 complete(&fep->mdio_done);
711 }
712 } while (int_events);
713
714 return ret;
715}
716
717
718
581/* ------------------------------------------------------------------------- */ 719/* ------------------------------------------------------------------------- */
582static void __inline__ fec_get_mac(struct net_device *dev) 720static void __inline__ fec_get_mac(struct net_device *ndev)
583{ 721{
584 struct fec_enet_private *fep = netdev_priv(dev); 722 struct fec_enet_private *fep = netdev_priv(ndev);
585 struct fec_platform_data *pdata = fep->pdev->dev.platform_data; 723 struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
586 unsigned char *iap, tmpaddr[ETH_ALEN]; 724 unsigned char *iap, tmpaddr[ETH_ALEN];
587 725
@@ -617,11 +755,11 @@ static void __inline__ fec_get_mac(struct net_device *dev)
617 iap = &tmpaddr[0]; 755 iap = &tmpaddr[0];
618 } 756 }
619 757
620 memcpy(dev->dev_addr, iap, ETH_ALEN); 758 memcpy(ndev->dev_addr, iap, ETH_ALEN);
621 759
622 /* Adjust MAC if using macaddr */ 760 /* Adjust MAC if using macaddr */
623 if (iap == macaddr) 761 if (iap == macaddr)
624 dev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id; 762 ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id;
625} 763}
626 764
627/* ------------------------------------------------------------------------- */ 765/* ------------------------------------------------------------------------- */
@@ -629,9 +767,9 @@ static void __inline__ fec_get_mac(struct net_device *dev)
629/* 767/*
630 * Phy section 768 * Phy section
631 */ 769 */
632static void fec_enet_adjust_link(struct net_device *dev) 770static void fec_enet_adjust_link(struct net_device *ndev)
633{ 771{
634 struct fec_enet_private *fep = netdev_priv(dev); 772 struct fec_enet_private *fep = netdev_priv(ndev);
635 struct phy_device *phy_dev = fep->phy_dev; 773 struct phy_device *phy_dev = fep->phy_dev;
636 unsigned long flags; 774 unsigned long flags;
637 775
@@ -648,7 +786,7 @@ static void fec_enet_adjust_link(struct net_device *dev)
648 /* Duplex link change */ 786 /* Duplex link change */
649 if (phy_dev->link) { 787 if (phy_dev->link) {
650 if (fep->full_duplex != phy_dev->duplex) { 788 if (fep->full_duplex != phy_dev->duplex) {
651 fec_restart(dev, phy_dev->duplex); 789 fec_restart(ndev, phy_dev->duplex);
652 status_change = 1; 790 status_change = 1;
653 } 791 }
654 } 792 }
@@ -657,9 +795,9 @@ static void fec_enet_adjust_link(struct net_device *dev)
657 if (phy_dev->link != fep->link) { 795 if (phy_dev->link != fep->link) {
658 fep->link = phy_dev->link; 796 fep->link = phy_dev->link;
659 if (phy_dev->link) 797 if (phy_dev->link)
660 fec_restart(dev, phy_dev->duplex); 798 fec_restart(ndev, phy_dev->duplex);
661 else 799 else
662 fec_stop(dev); 800 fec_stop(ndev);
663 status_change = 1; 801 status_change = 1;
664 } 802 }
665 803
@@ -728,9 +866,9 @@ static int fec_enet_mdio_reset(struct mii_bus *bus)
728 return 0; 866 return 0;
729} 867}
730 868
731static int fec_enet_mii_probe(struct net_device *dev) 869static int fec_enet_mii_probe(struct net_device *ndev)
732{ 870{
733 struct fec_enet_private *fep = netdev_priv(dev); 871 struct fec_enet_private *fep = netdev_priv(ndev);
734 struct phy_device *phy_dev = NULL; 872 struct phy_device *phy_dev = NULL;
735 char mdio_bus_id[MII_BUS_ID_SIZE]; 873 char mdio_bus_id[MII_BUS_ID_SIZE];
736 char phy_name[MII_BUS_ID_SIZE + 3]; 874 char phy_name[MII_BUS_ID_SIZE + 3];
@@ -755,16 +893,16 @@ static int fec_enet_mii_probe(struct net_device *dev)
755 893
756 if (phy_id >= PHY_MAX_ADDR) { 894 if (phy_id >= PHY_MAX_ADDR) {
757 printk(KERN_INFO "%s: no PHY, assuming direct connection " 895 printk(KERN_INFO "%s: no PHY, assuming direct connection "
758 "to switch\n", dev->name); 896 "to switch\n", ndev->name);
759 strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE); 897 strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE);
760 phy_id = 0; 898 phy_id = 0;
761 } 899 }
762 900
763 snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id); 901 snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id);
764 phy_dev = phy_connect(dev, phy_name, &fec_enet_adjust_link, 0, 902 phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, 0,
765 PHY_INTERFACE_MODE_MII); 903 PHY_INTERFACE_MODE_MII);
766 if (IS_ERR(phy_dev)) { 904 if (IS_ERR(phy_dev)) {
767 printk(KERN_ERR "%s: could not attach to PHY\n", dev->name); 905 printk(KERN_ERR "%s: could not attach to PHY\n", ndev->name);
768 return PTR_ERR(phy_dev); 906 return PTR_ERR(phy_dev);
769 } 907 }
770 908
@@ -777,7 +915,7 @@ static int fec_enet_mii_probe(struct net_device *dev)
777 fep->full_duplex = 0; 915 fep->full_duplex = 0;
778 916
779 printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] " 917 printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] "
780 "(mii_bus:phy_addr=%s, irq=%d)\n", dev->name, 918 "(mii_bus:phy_addr=%s, irq=%d)\n", ndev->name,
781 fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev), 919 fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
782 fep->phy_dev->irq); 920 fep->phy_dev->irq);
783 921
@@ -787,8 +925,8 @@ static int fec_enet_mii_probe(struct net_device *dev)
787static int fec_enet_mii_init(struct platform_device *pdev) 925static int fec_enet_mii_init(struct platform_device *pdev)
788{ 926{
789 static struct mii_bus *fec0_mii_bus; 927 static struct mii_bus *fec0_mii_bus;
790 struct net_device *dev = platform_get_drvdata(pdev); 928 struct net_device *ndev = platform_get_drvdata(pdev);
791 struct fec_enet_private *fep = netdev_priv(dev); 929 struct fec_enet_private *fep = netdev_priv(ndev);
792 const struct platform_device_id *id_entry = 930 const struct platform_device_id *id_entry =
793 platform_get_device_id(fep->pdev); 931 platform_get_device_id(fep->pdev);
794 int err = -ENXIO, i; 932 int err = -ENXIO, i;
@@ -846,8 +984,6 @@ static int fec_enet_mii_init(struct platform_device *pdev)
846 for (i = 0; i < PHY_MAX_ADDR; i++) 984 for (i = 0; i < PHY_MAX_ADDR; i++)
847 fep->mii_bus->irq[i] = PHY_POLL; 985 fep->mii_bus->irq[i] = PHY_POLL;
848 986
849 platform_set_drvdata(dev, fep->mii_bus);
850
851 if (mdiobus_register(fep->mii_bus)) 987 if (mdiobus_register(fep->mii_bus))
852 goto err_out_free_mdio_irq; 988 goto err_out_free_mdio_irq;
853 989
@@ -874,10 +1010,10 @@ static void fec_enet_mii_remove(struct fec_enet_private *fep)
874 mdiobus_free(fep->mii_bus); 1010 mdiobus_free(fep->mii_bus);
875} 1011}
876 1012
877static int fec_enet_get_settings(struct net_device *dev, 1013static int fec_enet_get_settings(struct net_device *ndev,
878 struct ethtool_cmd *cmd) 1014 struct ethtool_cmd *cmd)
879{ 1015{
880 struct fec_enet_private *fep = netdev_priv(dev); 1016 struct fec_enet_private *fep = netdev_priv(ndev);
881 struct phy_device *phydev = fep->phy_dev; 1017 struct phy_device *phydev = fep->phy_dev;
882 1018
883 if (!phydev) 1019 if (!phydev)
@@ -886,10 +1022,10 @@ static int fec_enet_get_settings(struct net_device *dev,
886 return phy_ethtool_gset(phydev, cmd); 1022 return phy_ethtool_gset(phydev, cmd);
887} 1023}
888 1024
889static int fec_enet_set_settings(struct net_device *dev, 1025static int fec_enet_set_settings(struct net_device *ndev,
890 struct ethtool_cmd *cmd) 1026 struct ethtool_cmd *cmd)
891{ 1027{
892 struct fec_enet_private *fep = netdev_priv(dev); 1028 struct fec_enet_private *fep = netdev_priv(ndev);
893 struct phy_device *phydev = fep->phy_dev; 1029 struct phy_device *phydev = fep->phy_dev;
894 1030
895 if (!phydev) 1031 if (!phydev)
@@ -898,14 +1034,14 @@ static int fec_enet_set_settings(struct net_device *dev,
898 return phy_ethtool_sset(phydev, cmd); 1034 return phy_ethtool_sset(phydev, cmd);
899} 1035}
900 1036
901static void fec_enet_get_drvinfo(struct net_device *dev, 1037static void fec_enet_get_drvinfo(struct net_device *ndev,
902 struct ethtool_drvinfo *info) 1038 struct ethtool_drvinfo *info)
903{ 1039{
904 struct fec_enet_private *fep = netdev_priv(dev); 1040 struct fec_enet_private *fep = netdev_priv(ndev);
905 1041
906 strcpy(info->driver, fep->pdev->dev.driver->name); 1042 strcpy(info->driver, fep->pdev->dev.driver->name);
907 strcpy(info->version, "Revision: 1.0"); 1043 strcpy(info->version, "Revision: 1.0");
908 strcpy(info->bus_info, dev_name(&dev->dev)); 1044 strcpy(info->bus_info, dev_name(&ndev->dev));
909} 1045}
910 1046
911static struct ethtool_ops fec_enet_ethtool_ops = { 1047static struct ethtool_ops fec_enet_ethtool_ops = {
@@ -915,12 +1051,12 @@ static struct ethtool_ops fec_enet_ethtool_ops = {
915 .get_link = ethtool_op_get_link, 1051 .get_link = ethtool_op_get_link,
916}; 1052};
917 1053
918static int fec_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1054static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
919{ 1055{
920 struct fec_enet_private *fep = netdev_priv(dev); 1056 struct fec_enet_private *fep = netdev_priv(ndev);
921 struct phy_device *phydev = fep->phy_dev; 1057 struct phy_device *phydev = fep->phy_dev;
922 1058
923 if (!netif_running(dev)) 1059 if (!netif_running(ndev))
924 return -EINVAL; 1060 return -EINVAL;
925 1061
926 if (!phydev) 1062 if (!phydev)
@@ -929,9 +1065,9 @@ static int fec_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
929 return phy_mii_ioctl(phydev, rq, cmd); 1065 return phy_mii_ioctl(phydev, rq, cmd);
930} 1066}
931 1067
932static void fec_enet_free_buffers(struct net_device *dev) 1068static void fec_enet_free_buffers(struct net_device *ndev)
933{ 1069{
934 struct fec_enet_private *fep = netdev_priv(dev); 1070 struct fec_enet_private *fep = netdev_priv(ndev);
935 int i; 1071 int i;
936 struct sk_buff *skb; 1072 struct sk_buff *skb;
937 struct bufdesc *bdp; 1073 struct bufdesc *bdp;
@@ -941,7 +1077,7 @@ static void fec_enet_free_buffers(struct net_device *dev)
941 skb = fep->rx_skbuff[i]; 1077 skb = fep->rx_skbuff[i];
942 1078
943 if (bdp->cbd_bufaddr) 1079 if (bdp->cbd_bufaddr)
944 dma_unmap_single(&dev->dev, bdp->cbd_bufaddr, 1080 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
945 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); 1081 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
946 if (skb) 1082 if (skb)
947 dev_kfree_skb(skb); 1083 dev_kfree_skb(skb);
@@ -953,9 +1089,9 @@ static void fec_enet_free_buffers(struct net_device *dev)
953 kfree(fep->tx_bounce[i]); 1089 kfree(fep->tx_bounce[i]);
954} 1090}
955 1091
956static int fec_enet_alloc_buffers(struct net_device *dev) 1092static int fec_enet_alloc_buffers(struct net_device *ndev)
957{ 1093{
958 struct fec_enet_private *fep = netdev_priv(dev); 1094 struct fec_enet_private *fep = netdev_priv(ndev);
959 int i; 1095 int i;
960 struct sk_buff *skb; 1096 struct sk_buff *skb;
961 struct bufdesc *bdp; 1097 struct bufdesc *bdp;
@@ -964,12 +1100,12 @@ static int fec_enet_alloc_buffers(struct net_device *dev)
964 for (i = 0; i < RX_RING_SIZE; i++) { 1100 for (i = 0; i < RX_RING_SIZE; i++) {
965 skb = dev_alloc_skb(FEC_ENET_RX_FRSIZE); 1101 skb = dev_alloc_skb(FEC_ENET_RX_FRSIZE);
966 if (!skb) { 1102 if (!skb) {
967 fec_enet_free_buffers(dev); 1103 fec_enet_free_buffers(ndev);
968 return -ENOMEM; 1104 return -ENOMEM;
969 } 1105 }
970 fep->rx_skbuff[i] = skb; 1106 fep->rx_skbuff[i] = skb;
971 1107
972 bdp->cbd_bufaddr = dma_map_single(&dev->dev, skb->data, 1108 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
973 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); 1109 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
974 bdp->cbd_sc = BD_ENET_RX_EMPTY; 1110 bdp->cbd_sc = BD_ENET_RX_EMPTY;
975 bdp++; 1111 bdp++;
@@ -996,45 +1132,47 @@ static int fec_enet_alloc_buffers(struct net_device *dev)
996} 1132}
997 1133
998static int 1134static int
999fec_enet_open(struct net_device *dev) 1135fec_enet_open(struct net_device *ndev)
1000{ 1136{
1001 struct fec_enet_private *fep = netdev_priv(dev); 1137 struct fec_enet_private *fep = netdev_priv(ndev);
1002 int ret; 1138 int ret;
1003 1139
1004 /* I should reset the ring buffers here, but I don't yet know 1140 /* I should reset the ring buffers here, but I don't yet know
1005 * a simple way to do that. 1141 * a simple way to do that.
1006 */ 1142 */
1007 1143
1008 ret = fec_enet_alloc_buffers(dev); 1144 ret = fec_enet_alloc_buffers(ndev);
1009 if (ret) 1145 if (ret)
1010 return ret; 1146 return ret;
1011 1147
1012 /* Probe and connect to PHY when open the interface */ 1148 /* Probe and connect to PHY when open the interface */
1013 ret = fec_enet_mii_probe(dev); 1149 ret = fec_enet_mii_probe(ndev);
1014 if (ret) { 1150 if (ret) {
1015 fec_enet_free_buffers(dev); 1151 fec_enet_free_buffers(ndev);
1016 return ret; 1152 return ret;
1017 } 1153 }
1018 phy_start(fep->phy_dev); 1154 phy_start(fep->phy_dev);
1019 netif_start_queue(dev); 1155 netif_start_queue(ndev);
1020 fep->opened = 1; 1156 fep->opened = 1;
1021 return 0; 1157 return 0;
1022} 1158}
1023 1159
1024static int 1160static int
1025fec_enet_close(struct net_device *dev) 1161fec_enet_close(struct net_device *ndev)
1026{ 1162{
1027 struct fec_enet_private *fep = netdev_priv(dev); 1163 struct fec_enet_private *fep = netdev_priv(ndev);
1028 1164
1029 /* Don't know what to do yet. */ 1165 /* Don't know what to do yet. */
1030 fep->opened = 0; 1166 fep->opened = 0;
1031 netif_stop_queue(dev); 1167 netif_stop_queue(ndev);
1032 fec_stop(dev); 1168 fec_stop(ndev);
1033 1169
1034 if (fep->phy_dev) 1170 if (fep->phy_dev) {
1171 phy_stop(fep->phy_dev);
1035 phy_disconnect(fep->phy_dev); 1172 phy_disconnect(fep->phy_dev);
1173 }
1036 1174
1037 fec_enet_free_buffers(dev); 1175 fec_enet_free_buffers(ndev);
1038 1176
1039 return 0; 1177 return 0;
1040} 1178}
@@ -1052,14 +1190,14 @@ fec_enet_close(struct net_device *dev)
1052#define HASH_BITS 6 /* #bits in hash */ 1190#define HASH_BITS 6 /* #bits in hash */
1053#define CRC32_POLY 0xEDB88320 1191#define CRC32_POLY 0xEDB88320
1054 1192
1055static void set_multicast_list(struct net_device *dev) 1193static void set_multicast_list(struct net_device *ndev)
1056{ 1194{
1057 struct fec_enet_private *fep = netdev_priv(dev); 1195 struct fec_enet_private *fep = netdev_priv(ndev);
1058 struct netdev_hw_addr *ha; 1196 struct netdev_hw_addr *ha;
1059 unsigned int i, bit, data, crc, tmp; 1197 unsigned int i, bit, data, crc, tmp;
1060 unsigned char hash; 1198 unsigned char hash;
1061 1199
1062 if (dev->flags & IFF_PROMISC) { 1200 if (ndev->flags & IFF_PROMISC) {
1063 tmp = readl(fep->hwp + FEC_R_CNTRL); 1201 tmp = readl(fep->hwp + FEC_R_CNTRL);
1064 tmp |= 0x8; 1202 tmp |= 0x8;
1065 writel(tmp, fep->hwp + FEC_R_CNTRL); 1203 writel(tmp, fep->hwp + FEC_R_CNTRL);
@@ -1070,7 +1208,7 @@ static void set_multicast_list(struct net_device *dev)
1070 tmp &= ~0x8; 1208 tmp &= ~0x8;
1071 writel(tmp, fep->hwp + FEC_R_CNTRL); 1209 writel(tmp, fep->hwp + FEC_R_CNTRL);
1072 1210
1073 if (dev->flags & IFF_ALLMULTI) { 1211 if (ndev->flags & IFF_ALLMULTI) {
1074 /* Catch all multicast addresses, so set the 1212 /* Catch all multicast addresses, so set the
1075 * filter to all 1's 1213 * filter to all 1's
1076 */ 1214 */
@@ -1085,7 +1223,7 @@ static void set_multicast_list(struct net_device *dev)
1085 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 1223 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1086 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 1224 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1087 1225
1088 netdev_for_each_mc_addr(ha, dev) { 1226 netdev_for_each_mc_addr(ha, ndev) {
1089 /* Only support group multicast for now */ 1227 /* Only support group multicast for now */
1090 if (!(ha->addr[0] & 1)) 1228 if (!(ha->addr[0] & 1))
1091 continue; 1229 continue;
@@ -1093,7 +1231,7 @@ static void set_multicast_list(struct net_device *dev)
1093 /* calculate crc32 value of mac address */ 1231 /* calculate crc32 value of mac address */
1094 crc = 0xffffffff; 1232 crc = 0xffffffff;
1095 1233
1096 for (i = 0; i < dev->addr_len; i++) { 1234 for (i = 0; i < ndev->addr_len; i++) {
1097 data = ha->addr[i]; 1235 data = ha->addr[i];
1098 for (bit = 0; bit < 8; bit++, data >>= 1) { 1236 for (bit = 0; bit < 8; bit++, data >>= 1) {
1099 crc = (crc >> 1) ^ 1237 crc = (crc >> 1) ^
@@ -1120,20 +1258,20 @@ static void set_multicast_list(struct net_device *dev)
1120 1258
1121/* Set a MAC change in hardware. */ 1259/* Set a MAC change in hardware. */
1122static int 1260static int
1123fec_set_mac_address(struct net_device *dev, void *p) 1261fec_set_mac_address(struct net_device *ndev, void *p)
1124{ 1262{
1125 struct fec_enet_private *fep = netdev_priv(dev); 1263 struct fec_enet_private *fep = netdev_priv(ndev);
1126 struct sockaddr *addr = p; 1264 struct sockaddr *addr = p;
1127 1265
1128 if (!is_valid_ether_addr(addr->sa_data)) 1266 if (!is_valid_ether_addr(addr->sa_data))
1129 return -EADDRNOTAVAIL; 1267 return -EADDRNOTAVAIL;
1130 1268
1131 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 1269 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
1132 1270
1133 writel(dev->dev_addr[3] | (dev->dev_addr[2] << 8) | 1271 writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
1134 (dev->dev_addr[1] << 16) | (dev->dev_addr[0] << 24), 1272 (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
1135 fep->hwp + FEC_ADDR_LOW); 1273 fep->hwp + FEC_ADDR_LOW);
1136 writel((dev->dev_addr[5] << 16) | (dev->dev_addr[4] << 24), 1274 writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
1137 fep->hwp + FEC_ADDR_HIGH); 1275 fep->hwp + FEC_ADDR_HIGH);
1138 return 0; 1276 return 0;
1139} 1277}
@@ -1147,16 +1285,16 @@ static const struct net_device_ops fec_netdev_ops = {
1147 .ndo_validate_addr = eth_validate_addr, 1285 .ndo_validate_addr = eth_validate_addr,
1148 .ndo_tx_timeout = fec_timeout, 1286 .ndo_tx_timeout = fec_timeout,
1149 .ndo_set_mac_address = fec_set_mac_address, 1287 .ndo_set_mac_address = fec_set_mac_address,
1150 .ndo_do_ioctl = fec_enet_ioctl, 1288 .ndo_do_ioctl = fec_enet_ioctl,
1151}; 1289};
1152 1290
1153 /* 1291 /*
1154 * XXX: We need to clean up on failure exits here. 1292 * XXX: We need to clean up on failure exits here.
1155 * 1293 *
1156 */ 1294 */
1157static int fec_enet_init(struct net_device *dev) 1295static int fec_enet_init(struct net_device *ndev)
1158{ 1296{
1159 struct fec_enet_private *fep = netdev_priv(dev); 1297 struct fec_enet_private *fep = netdev_priv(ndev);
1160 struct bufdesc *cbd_base; 1298 struct bufdesc *cbd_base;
1161 struct bufdesc *bdp; 1299 struct bufdesc *bdp;
1162 int i; 1300 int i;
@@ -1171,20 +1309,19 @@ static int fec_enet_init(struct net_device *dev)
1171 1309
1172 spin_lock_init(&fep->hw_lock); 1310 spin_lock_init(&fep->hw_lock);
1173 1311
1174 fep->hwp = (void __iomem *)dev->base_addr; 1312 fep->netdev = ndev;
1175 fep->netdev = dev;
1176 1313
1177 /* Get the Ethernet address */ 1314 /* Get the Ethernet address */
1178 fec_get_mac(dev); 1315 fec_get_mac(ndev);
1179 1316
1180 /* Set receive and transmit descriptor base. */ 1317 /* Set receive and transmit descriptor base. */
1181 fep->rx_bd_base = cbd_base; 1318 fep->rx_bd_base = cbd_base;
1182 fep->tx_bd_base = cbd_base + RX_RING_SIZE; 1319 fep->tx_bd_base = cbd_base + RX_RING_SIZE;
1183 1320
1184 /* The FEC Ethernet specific entries in the device structure */ 1321 /* The FEC Ethernet specific entries in the device structure */
1185 dev->watchdog_timeo = TX_TIMEOUT; 1322 ndev->watchdog_timeo = TX_TIMEOUT;
1186 dev->netdev_ops = &fec_netdev_ops; 1323 ndev->netdev_ops = &fec_netdev_ops;
1187 dev->ethtool_ops = &fec_enet_ethtool_ops; 1324 ndev->ethtool_ops = &fec_enet_ethtool_ops;
1188 1325
1189 /* Initialize the receive buffer descriptors. */ 1326 /* Initialize the receive buffer descriptors. */
1190 bdp = fep->rx_bd_base; 1327 bdp = fep->rx_bd_base;
@@ -1213,152 +1350,11 @@ static int fec_enet_init(struct net_device *dev)
1213 bdp--; 1350 bdp--;
1214 bdp->cbd_sc |= BD_SC_WRAP; 1351 bdp->cbd_sc |= BD_SC_WRAP;
1215 1352
1216 fec_restart(dev, 0); 1353 fec_restart(ndev, 0);
1217 1354
1218 return 0; 1355 return 0;
1219} 1356}
1220 1357
1221/* This function is called to start or restart the FEC during a link
1222 * change. This only happens when switching between half and full
1223 * duplex.
1224 */
1225static void
1226fec_restart(struct net_device *dev, int duplex)
1227{
1228 struct fec_enet_private *fep = netdev_priv(dev);
1229 const struct platform_device_id *id_entry =
1230 platform_get_device_id(fep->pdev);
1231 int i;
1232 u32 val, temp_mac[2];
1233
1234 /* Whack a reset. We should wait for this. */
1235 writel(1, fep->hwp + FEC_ECNTRL);
1236 udelay(10);
1237
1238 /*
1239 * enet-mac reset will reset mac address registers too,
1240 * so need to reconfigure it.
1241 */
1242 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
1243 memcpy(&temp_mac, dev->dev_addr, ETH_ALEN);
1244 writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
1245 writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
1246 }
1247
1248 /* Clear any outstanding interrupt. */
1249 writel(0xffc00000, fep->hwp + FEC_IEVENT);
1250
1251 /* Reset all multicast. */
1252 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1253 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1254#ifndef CONFIG_M5272
1255 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
1256 writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
1257#endif
1258
1259 /* Set maximum receive buffer size. */
1260 writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
1261
1262 /* Set receive and transmit descriptor base. */
1263 writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
1264 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE,
1265 fep->hwp + FEC_X_DES_START);
1266
1267 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
1268 fep->cur_rx = fep->rx_bd_base;
1269
1270 /* Reset SKB transmit buffers. */
1271 fep->skb_cur = fep->skb_dirty = 0;
1272 for (i = 0; i <= TX_RING_MOD_MASK; i++) {
1273 if (fep->tx_skbuff[i]) {
1274 dev_kfree_skb_any(fep->tx_skbuff[i]);
1275 fep->tx_skbuff[i] = NULL;
1276 }
1277 }
1278
1279 /* Enable MII mode */
1280 if (duplex) {
1281 /* MII enable / FD enable */
1282 writel(OPT_FRAME_SIZE | 0x04, fep->hwp + FEC_R_CNTRL);
1283 writel(0x04, fep->hwp + FEC_X_CNTRL);
1284 } else {
1285 /* MII enable / No Rcv on Xmit */
1286 writel(OPT_FRAME_SIZE | 0x06, fep->hwp + FEC_R_CNTRL);
1287 writel(0x0, fep->hwp + FEC_X_CNTRL);
1288 }
1289 fep->full_duplex = duplex;
1290
1291 /* Set MII speed */
1292 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1293
1294 /*
1295 * The phy interface and speed need to get configured
1296 * differently on enet-mac.
1297 */
1298 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
1299 val = readl(fep->hwp + FEC_R_CNTRL);
1300
1301 /* MII or RMII */
1302 if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
1303 val |= (1 << 8);
1304 else
1305 val &= ~(1 << 8);
1306
1307 /* 10M or 100M */
1308 if (fep->phy_dev && fep->phy_dev->speed == SPEED_100)
1309 val &= ~(1 << 9);
1310 else
1311 val |= (1 << 9);
1312
1313 writel(val, fep->hwp + FEC_R_CNTRL);
1314 } else {
1315#ifdef FEC_MIIGSK_ENR
1316 if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) {
1317 /* disable the gasket and wait */
1318 writel(0, fep->hwp + FEC_MIIGSK_ENR);
1319 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
1320 udelay(1);
1321
1322 /*
1323 * configure the gasket:
1324 * RMII, 50 MHz, no loopback, no echo
1325 */
1326 writel(1, fep->hwp + FEC_MIIGSK_CFGR);
1327
1328 /* re-enable the gasket */
1329 writel(2, fep->hwp + FEC_MIIGSK_ENR);
1330 }
1331#endif
1332 }
1333
1334 /* And last, enable the transmit and receive processing */
1335 writel(2, fep->hwp + FEC_ECNTRL);
1336 writel(0, fep->hwp + FEC_R_DES_ACTIVE);
1337
1338 /* Enable interrupts we wish to service */
1339 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1340}
1341
1342static void
1343fec_stop(struct net_device *dev)
1344{
1345 struct fec_enet_private *fep = netdev_priv(dev);
1346
1347 /* We cannot expect a graceful transmit stop without link !!! */
1348 if (fep->link) {
1349 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
1350 udelay(10);
1351 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
1352 printk("fec_stop : Graceful transmit stop did not complete !\n");
1353 }
1354
1355 /* Whack a reset. We should wait for this. */
1356 writel(1, fep->hwp + FEC_ECNTRL);
1357 udelay(10);
1358 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1359 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1360}
1361
1362static int __devinit 1358static int __devinit
1363fec_probe(struct platform_device *pdev) 1359fec_probe(struct platform_device *pdev)
1364{ 1360{
@@ -1378,19 +1374,20 @@ fec_probe(struct platform_device *pdev)
1378 1374
1379 /* Init network device */ 1375 /* Init network device */
1380 ndev = alloc_etherdev(sizeof(struct fec_enet_private)); 1376 ndev = alloc_etherdev(sizeof(struct fec_enet_private));
1381 if (!ndev) 1377 if (!ndev) {
1382 return -ENOMEM; 1378 ret = -ENOMEM;
1379 goto failed_alloc_etherdev;
1380 }
1383 1381
1384 SET_NETDEV_DEV(ndev, &pdev->dev); 1382 SET_NETDEV_DEV(ndev, &pdev->dev);
1385 1383
1386 /* setup board info structure */ 1384 /* setup board info structure */
1387 fep = netdev_priv(ndev); 1385 fep = netdev_priv(ndev);
1388 memset(fep, 0, sizeof(*fep));
1389 1386
1390 ndev->base_addr = (unsigned long)ioremap(r->start, resource_size(r)); 1387 fep->hwp = ioremap(r->start, resource_size(r));
1391 fep->pdev = pdev; 1388 fep->pdev = pdev;
1392 1389
1393 if (!ndev->base_addr) { 1390 if (!fep->hwp) {
1394 ret = -ENOMEM; 1391 ret = -ENOMEM;
1395 goto failed_ioremap; 1392 goto failed_ioremap;
1396 } 1393 }
@@ -1408,10 +1405,9 @@ fec_probe(struct platform_device *pdev)
1408 break; 1405 break;
1409 ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev); 1406 ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev);
1410 if (ret) { 1407 if (ret) {
1411 while (i >= 0) { 1408 while (--i >= 0) {
1412 irq = platform_get_irq(pdev, i); 1409 irq = platform_get_irq(pdev, i);
1413 free_irq(irq, ndev); 1410 free_irq(irq, ndev);
1414 i--;
1415 } 1411 }
1416 goto failed_irq; 1412 goto failed_irq;
1417 } 1413 }
@@ -1454,9 +1450,11 @@ failed_clk:
1454 free_irq(irq, ndev); 1450 free_irq(irq, ndev);
1455 } 1451 }
1456failed_irq: 1452failed_irq:
1457 iounmap((void __iomem *)ndev->base_addr); 1453 iounmap(fep->hwp);
1458failed_ioremap: 1454failed_ioremap:
1459 free_netdev(ndev); 1455 free_netdev(ndev);
1456failed_alloc_etherdev:
1457 release_mem_region(r->start, resource_size(r));
1460 1458
1461 return ret; 1459 return ret;
1462} 1460}
@@ -1466,16 +1464,22 @@ fec_drv_remove(struct platform_device *pdev)
1466{ 1464{
1467 struct net_device *ndev = platform_get_drvdata(pdev); 1465 struct net_device *ndev = platform_get_drvdata(pdev);
1468 struct fec_enet_private *fep = netdev_priv(ndev); 1466 struct fec_enet_private *fep = netdev_priv(ndev);
1469 1467 struct resource *r;
1470 platform_set_drvdata(pdev, NULL);
1471 1468
1472 fec_stop(ndev); 1469 fec_stop(ndev);
1473 fec_enet_mii_remove(fep); 1470 fec_enet_mii_remove(fep);
1474 clk_disable(fep->clk); 1471 clk_disable(fep->clk);
1475 clk_put(fep->clk); 1472 clk_put(fep->clk);
1476 iounmap((void __iomem *)ndev->base_addr); 1473 iounmap(fep->hwp);
1477 unregister_netdev(ndev); 1474 unregister_netdev(ndev);
1478 free_netdev(ndev); 1475 free_netdev(ndev);
1476
1477 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1478 BUG_ON(!r);
1479 release_mem_region(r->start, resource_size(r));
1480
1481 platform_set_drvdata(pdev, NULL);
1482
1479 return 0; 1483 return 0;
1480} 1484}
1481 1485
@@ -1484,16 +1488,14 @@ static int
1484fec_suspend(struct device *dev) 1488fec_suspend(struct device *dev)
1485{ 1489{
1486 struct net_device *ndev = dev_get_drvdata(dev); 1490 struct net_device *ndev = dev_get_drvdata(dev);
1487 struct fec_enet_private *fep; 1491 struct fec_enet_private *fep = netdev_priv(ndev);
1488 1492
1489 if (ndev) { 1493 if (netif_running(ndev)) {
1490 fep = netdev_priv(ndev); 1494 fec_stop(ndev);
1491 if (netif_running(ndev)) { 1495 netif_device_detach(ndev);
1492 fec_stop(ndev);
1493 netif_device_detach(ndev);
1494 }
1495 clk_disable(fep->clk);
1496 } 1496 }
1497 clk_disable(fep->clk);
1498
1497 return 0; 1499 return 0;
1498} 1500}
1499 1501
@@ -1501,16 +1503,14 @@ static int
1501fec_resume(struct device *dev) 1503fec_resume(struct device *dev)
1502{ 1504{
1503 struct net_device *ndev = dev_get_drvdata(dev); 1505 struct net_device *ndev = dev_get_drvdata(dev);
1504 struct fec_enet_private *fep; 1506 struct fec_enet_private *fep = netdev_priv(ndev);
1505 1507
1506 if (ndev) { 1508 clk_enable(fep->clk);
1507 fep = netdev_priv(ndev); 1509 if (netif_running(ndev)) {
1508 clk_enable(fep->clk); 1510 fec_restart(ndev, fep->full_duplex);
1509 if (netif_running(ndev)) { 1511 netif_device_attach(ndev);
1510 fec_restart(ndev, fep->full_duplex);
1511 netif_device_attach(ndev);
1512 }
1513 } 1512 }
1513
1514 return 0; 1514 return 0;
1515} 1515}
1516 1516
diff --git a/drivers/net/ftmac100.c b/drivers/net/ftmac100.c
new file mode 100644
index 00000000000..df70368bf31
--- /dev/null
+++ b/drivers/net/ftmac100.c
@@ -0,0 +1,1196 @@
1/*
2 * Faraday FTMAC100 10/100 Ethernet
3 *
4 * (C) Copyright 2009-2011 Faraday Technology
5 * Po-Yu Chuang <ratbert@faraday-tech.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
24#include <linux/dma-mapping.h>
25#include <linux/etherdevice.h>
26#include <linux/ethtool.h>
27#include <linux/init.h>
28#include <linux/io.h>
29#include <linux/mii.h>
30#include <linux/module.h>
31#include <linux/netdevice.h>
32#include <linux/platform_device.h>
33
34#include "ftmac100.h"
35
36#define DRV_NAME "ftmac100"
37#define DRV_VERSION "0.2"
38
39#define RX_QUEUE_ENTRIES 128 /* must be power of 2 */
40#define TX_QUEUE_ENTRIES 16 /* must be power of 2 */
41
42#define MAX_PKT_SIZE 1518
43#define RX_BUF_SIZE 2044 /* must be smaller than 0x7ff */
44
45#if MAX_PKT_SIZE > 0x7ff
46#error invalid MAX_PKT_SIZE
47#endif
48
49#if RX_BUF_SIZE > 0x7ff || RX_BUF_SIZE > PAGE_SIZE
50#error invalid RX_BUF_SIZE
51#endif
52
53/******************************************************************************
54 * private data
55 *****************************************************************************/
56struct ftmac100_descs {
57 struct ftmac100_rxdes rxdes[RX_QUEUE_ENTRIES];
58 struct ftmac100_txdes txdes[TX_QUEUE_ENTRIES];
59};
60
61struct ftmac100 {
62 struct resource *res;
63 void __iomem *base;
64 int irq;
65
66 struct ftmac100_descs *descs;
67 dma_addr_t descs_dma_addr;
68
69 unsigned int rx_pointer;
70 unsigned int tx_clean_pointer;
71 unsigned int tx_pointer;
72 unsigned int tx_pending;
73
74 spinlock_t tx_lock;
75
76 struct net_device *netdev;
77 struct device *dev;
78 struct napi_struct napi;
79
80 struct mii_if_info mii;
81};
82
83static int ftmac100_alloc_rx_page(struct ftmac100 *priv, struct ftmac100_rxdes *rxdes);
84
85/******************************************************************************
86 * internal functions (hardware register access)
87 *****************************************************************************/
88#define INT_MASK_ALL_ENABLED (FTMAC100_INT_RPKT_FINISH | \
89 FTMAC100_INT_NORXBUF | \
90 FTMAC100_INT_XPKT_OK | \
91 FTMAC100_INT_XPKT_LOST | \
92 FTMAC100_INT_RPKT_LOST | \
93 FTMAC100_INT_AHB_ERR | \
94 FTMAC100_INT_PHYSTS_CHG)
95
96#define INT_MASK_ALL_DISABLED 0
97
98static void ftmac100_enable_all_int(struct ftmac100 *priv)
99{
100 iowrite32(INT_MASK_ALL_ENABLED, priv->base + FTMAC100_OFFSET_IMR);
101}
102
103static void ftmac100_disable_all_int(struct ftmac100 *priv)
104{
105 iowrite32(INT_MASK_ALL_DISABLED, priv->base + FTMAC100_OFFSET_IMR);
106}
107
108static void ftmac100_set_rx_ring_base(struct ftmac100 *priv, dma_addr_t addr)
109{
110 iowrite32(addr, priv->base + FTMAC100_OFFSET_RXR_BADR);
111}
112
113static void ftmac100_set_tx_ring_base(struct ftmac100 *priv, dma_addr_t addr)
114{
115 iowrite32(addr, priv->base + FTMAC100_OFFSET_TXR_BADR);
116}
117
118static void ftmac100_txdma_start_polling(struct ftmac100 *priv)
119{
120 iowrite32(1, priv->base + FTMAC100_OFFSET_TXPD);
121}
122
123static int ftmac100_reset(struct ftmac100 *priv)
124{
125 struct net_device *netdev = priv->netdev;
126 int i;
127
128 /* NOTE: reset clears all registers */
129 iowrite32(FTMAC100_MACCR_SW_RST, priv->base + FTMAC100_OFFSET_MACCR);
130
131 for (i = 0; i < 5; i++) {
132 unsigned int maccr;
133
134 maccr = ioread32(priv->base + FTMAC100_OFFSET_MACCR);
135 if (!(maccr & FTMAC100_MACCR_SW_RST)) {
136 /*
137 * FTMAC100_MACCR_SW_RST cleared does not indicate
138 * that hardware reset completed (what the f*ck).
139 * We still need to wait for a while.
140 */
141 usleep_range(500, 1000);
142 return 0;
143 }
144
145 usleep_range(1000, 10000);
146 }
147
148 netdev_err(netdev, "software reset failed\n");
149 return -EIO;
150}
151
152static void ftmac100_set_mac(struct ftmac100 *priv, const unsigned char *mac)
153{
154 unsigned int maddr = mac[0] << 8 | mac[1];
155 unsigned int laddr = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
156
157 iowrite32(maddr, priv->base + FTMAC100_OFFSET_MAC_MADR);
158 iowrite32(laddr, priv->base + FTMAC100_OFFSET_MAC_LADR);
159}
160
161#define MACCR_ENABLE_ALL (FTMAC100_MACCR_XMT_EN | \
162 FTMAC100_MACCR_RCV_EN | \
163 FTMAC100_MACCR_XDMA_EN | \
164 FTMAC100_MACCR_RDMA_EN | \
165 FTMAC100_MACCR_CRC_APD | \
166 FTMAC100_MACCR_FULLDUP | \
167 FTMAC100_MACCR_RX_RUNT | \
168 FTMAC100_MACCR_RX_BROADPKT)
169
170static int ftmac100_start_hw(struct ftmac100 *priv)
171{
172 struct net_device *netdev = priv->netdev;
173
174 if (ftmac100_reset(priv))
175 return -EIO;
176
177 /* setup ring buffer base registers */
178 ftmac100_set_rx_ring_base(priv,
179 priv->descs_dma_addr +
180 offsetof(struct ftmac100_descs, rxdes));
181 ftmac100_set_tx_ring_base(priv,
182 priv->descs_dma_addr +
183 offsetof(struct ftmac100_descs, txdes));
184
185 iowrite32(FTMAC100_APTC_RXPOLL_CNT(1), priv->base + FTMAC100_OFFSET_APTC);
186
187 ftmac100_set_mac(priv, netdev->dev_addr);
188
189 iowrite32(MACCR_ENABLE_ALL, priv->base + FTMAC100_OFFSET_MACCR);
190 return 0;
191}
192
193static void ftmac100_stop_hw(struct ftmac100 *priv)
194{
195 iowrite32(0, priv->base + FTMAC100_OFFSET_MACCR);
196}
197
198/******************************************************************************
199 * internal functions (receive descriptor)
200 *****************************************************************************/
201static bool ftmac100_rxdes_first_segment(struct ftmac100_rxdes *rxdes)
202{
203 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_FRS);
204}
205
206static bool ftmac100_rxdes_last_segment(struct ftmac100_rxdes *rxdes)
207{
208 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_LRS);
209}
210
211static bool ftmac100_rxdes_owned_by_dma(struct ftmac100_rxdes *rxdes)
212{
213 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RXDMA_OWN);
214}
215
216static void ftmac100_rxdes_set_dma_own(struct ftmac100_rxdes *rxdes)
217{
218 /* clear status bits */
219 rxdes->rxdes0 = cpu_to_le32(FTMAC100_RXDES0_RXDMA_OWN);
220}
221
222static bool ftmac100_rxdes_rx_error(struct ftmac100_rxdes *rxdes)
223{
224 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RX_ERR);
225}
226
227static bool ftmac100_rxdes_crc_error(struct ftmac100_rxdes *rxdes)
228{
229 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_CRC_ERR);
230}
231
232static bool ftmac100_rxdes_frame_too_long(struct ftmac100_rxdes *rxdes)
233{
234 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_FTL);
235}
236
237static bool ftmac100_rxdes_runt(struct ftmac100_rxdes *rxdes)
238{
239 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RUNT);
240}
241
242static bool ftmac100_rxdes_odd_nibble(struct ftmac100_rxdes *rxdes)
243{
244 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RX_ODD_NB);
245}
246
247static unsigned int ftmac100_rxdes_frame_length(struct ftmac100_rxdes *rxdes)
248{
249 return le32_to_cpu(rxdes->rxdes0) & FTMAC100_RXDES0_RFL;
250}
251
252static bool ftmac100_rxdes_multicast(struct ftmac100_rxdes *rxdes)
253{
254 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_MULTICAST);
255}
256
257static void ftmac100_rxdes_set_buffer_size(struct ftmac100_rxdes *rxdes,
258 unsigned int size)
259{
260 rxdes->rxdes1 &= cpu_to_le32(FTMAC100_RXDES1_EDORR);
261 rxdes->rxdes1 |= cpu_to_le32(FTMAC100_RXDES1_RXBUF_SIZE(size));
262}
263
264static void ftmac100_rxdes_set_end_of_ring(struct ftmac100_rxdes *rxdes)
265{
266 rxdes->rxdes1 |= cpu_to_le32(FTMAC100_RXDES1_EDORR);
267}
268
269static void ftmac100_rxdes_set_dma_addr(struct ftmac100_rxdes *rxdes,
270 dma_addr_t addr)
271{
272 rxdes->rxdes2 = cpu_to_le32(addr);
273}
274
275static dma_addr_t ftmac100_rxdes_get_dma_addr(struct ftmac100_rxdes *rxdes)
276{
277 return le32_to_cpu(rxdes->rxdes2);
278}
279
280/*
281 * rxdes3 is not used by hardware. We use it to keep track of page.
282 * Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu().
283 */
284static void ftmac100_rxdes_set_page(struct ftmac100_rxdes *rxdes, struct page *page)
285{
286 rxdes->rxdes3 = (unsigned int)page;
287}
288
289static struct page *ftmac100_rxdes_get_page(struct ftmac100_rxdes *rxdes)
290{
291 return (struct page *)rxdes->rxdes3;
292}
293
294/******************************************************************************
295 * internal functions (receive)
296 *****************************************************************************/
297static int ftmac100_next_rx_pointer(int pointer)
298{
299 return (pointer + 1) & (RX_QUEUE_ENTRIES - 1);
300}
301
302static void ftmac100_rx_pointer_advance(struct ftmac100 *priv)
303{
304 priv->rx_pointer = ftmac100_next_rx_pointer(priv->rx_pointer);
305}
306
307static struct ftmac100_rxdes *ftmac100_current_rxdes(struct ftmac100 *priv)
308{
309 return &priv->descs->rxdes[priv->rx_pointer];
310}
311
312static struct ftmac100_rxdes *
313ftmac100_rx_locate_first_segment(struct ftmac100 *priv)
314{
315 struct ftmac100_rxdes *rxdes = ftmac100_current_rxdes(priv);
316
317 while (!ftmac100_rxdes_owned_by_dma(rxdes)) {
318 if (ftmac100_rxdes_first_segment(rxdes))
319 return rxdes;
320
321 ftmac100_rxdes_set_dma_own(rxdes);
322 ftmac100_rx_pointer_advance(priv);
323 rxdes = ftmac100_current_rxdes(priv);
324 }
325
326 return NULL;
327}
328
329static bool ftmac100_rx_packet_error(struct ftmac100 *priv,
330 struct ftmac100_rxdes *rxdes)
331{
332 struct net_device *netdev = priv->netdev;
333 bool error = false;
334
335 if (unlikely(ftmac100_rxdes_rx_error(rxdes))) {
336 if (net_ratelimit())
337 netdev_info(netdev, "rx err\n");
338
339 netdev->stats.rx_errors++;
340 error = true;
341 }
342
343 if (unlikely(ftmac100_rxdes_crc_error(rxdes))) {
344 if (net_ratelimit())
345 netdev_info(netdev, "rx crc err\n");
346
347 netdev->stats.rx_crc_errors++;
348 error = true;
349 }
350
351 if (unlikely(ftmac100_rxdes_frame_too_long(rxdes))) {
352 if (net_ratelimit())
353 netdev_info(netdev, "rx frame too long\n");
354
355 netdev->stats.rx_length_errors++;
356 error = true;
357 } else if (unlikely(ftmac100_rxdes_runt(rxdes))) {
358 if (net_ratelimit())
359 netdev_info(netdev, "rx runt\n");
360
361 netdev->stats.rx_length_errors++;
362 error = true;
363 } else if (unlikely(ftmac100_rxdes_odd_nibble(rxdes))) {
364 if (net_ratelimit())
365 netdev_info(netdev, "rx odd nibble\n");
366
367 netdev->stats.rx_length_errors++;
368 error = true;
369 }
370
371 return error;
372}
373
374static void ftmac100_rx_drop_packet(struct ftmac100 *priv)
375{
376 struct net_device *netdev = priv->netdev;
377 struct ftmac100_rxdes *rxdes = ftmac100_current_rxdes(priv);
378 bool done = false;
379
380 if (net_ratelimit())
381 netdev_dbg(netdev, "drop packet %p\n", rxdes);
382
383 do {
384 if (ftmac100_rxdes_last_segment(rxdes))
385 done = true;
386
387 ftmac100_rxdes_set_dma_own(rxdes);
388 ftmac100_rx_pointer_advance(priv);
389 rxdes = ftmac100_current_rxdes(priv);
390 } while (!done && !ftmac100_rxdes_owned_by_dma(rxdes));
391
392 netdev->stats.rx_dropped++;
393}
394
395static bool ftmac100_rx_packet(struct ftmac100 *priv, int *processed)
396{
397 struct net_device *netdev = priv->netdev;
398 struct ftmac100_rxdes *rxdes;
399 struct sk_buff *skb;
400 struct page *page;
401 dma_addr_t map;
402 int length;
403
404 rxdes = ftmac100_rx_locate_first_segment(priv);
405 if (!rxdes)
406 return false;
407
408 if (unlikely(ftmac100_rx_packet_error(priv, rxdes))) {
409 ftmac100_rx_drop_packet(priv);
410 return true;
411 }
412
413 /*
414 * It is impossible to get multi-segment packets
415 * because we always provide big enough receive buffers.
416 */
417 if (unlikely(!ftmac100_rxdes_last_segment(rxdes)))
418 BUG();
419
420 /* start processing */
421 skb = netdev_alloc_skb_ip_align(netdev, 128);
422 if (unlikely(!skb)) {
423 if (net_ratelimit())
424 netdev_err(netdev, "rx skb alloc failed\n");
425
426 ftmac100_rx_drop_packet(priv);
427 return true;
428 }
429
430 if (unlikely(ftmac100_rxdes_multicast(rxdes)))
431 netdev->stats.multicast++;
432
433 map = ftmac100_rxdes_get_dma_addr(rxdes);
434 dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
435
436 length = ftmac100_rxdes_frame_length(rxdes);
437 page = ftmac100_rxdes_get_page(rxdes);
438 skb_fill_page_desc(skb, 0, page, 0, length);
439 skb->len += length;
440 skb->data_len += length;
441 skb->truesize += length;
442 __pskb_pull_tail(skb, min(length, 64));
443
444 ftmac100_alloc_rx_page(priv, rxdes);
445
446 ftmac100_rx_pointer_advance(priv);
447
448 skb->protocol = eth_type_trans(skb, netdev);
449
450 netdev->stats.rx_packets++;
451 netdev->stats.rx_bytes += skb->len;
452
453 /* push packet to protocol stack */
454 netif_receive_skb(skb);
455
456 (*processed)++;
457 return true;
458}
459
460/******************************************************************************
461 * internal functions (transmit descriptor)
462 *****************************************************************************/
463static void ftmac100_txdes_reset(struct ftmac100_txdes *txdes)
464{
465 /* clear all except end of ring bit */
466 txdes->txdes0 = 0;
467 txdes->txdes1 &= cpu_to_le32(FTMAC100_TXDES1_EDOTR);
468 txdes->txdes2 = 0;
469 txdes->txdes3 = 0;
470}
471
472static bool ftmac100_txdes_owned_by_dma(struct ftmac100_txdes *txdes)
473{
474 return txdes->txdes0 & cpu_to_le32(FTMAC100_TXDES0_TXDMA_OWN);
475}
476
477static void ftmac100_txdes_set_dma_own(struct ftmac100_txdes *txdes)
478{
479 /*
480 * Make sure dma own bit will not be set before any other
481 * descriptor fields.
482 */
483 wmb();
484 txdes->txdes0 |= cpu_to_le32(FTMAC100_TXDES0_TXDMA_OWN);
485}
486
487static bool ftmac100_txdes_excessive_collision(struct ftmac100_txdes *txdes)
488{
489 return txdes->txdes0 & cpu_to_le32(FTMAC100_TXDES0_TXPKT_EXSCOL);
490}
491
492static bool ftmac100_txdes_late_collision(struct ftmac100_txdes *txdes)
493{
494 return txdes->txdes0 & cpu_to_le32(FTMAC100_TXDES0_TXPKT_LATECOL);
495}
496
497static void ftmac100_txdes_set_end_of_ring(struct ftmac100_txdes *txdes)
498{
499 txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_EDOTR);
500}
501
502static void ftmac100_txdes_set_first_segment(struct ftmac100_txdes *txdes)
503{
504 txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_FTS);
505}
506
507static void ftmac100_txdes_set_last_segment(struct ftmac100_txdes *txdes)
508{
509 txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_LTS);
510}
511
512static void ftmac100_txdes_set_txint(struct ftmac100_txdes *txdes)
513{
514 txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_TXIC);
515}
516
517static void ftmac100_txdes_set_buffer_size(struct ftmac100_txdes *txdes,
518 unsigned int len)
519{
520 txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_TXBUF_SIZE(len));
521}
522
523static void ftmac100_txdes_set_dma_addr(struct ftmac100_txdes *txdes,
524 dma_addr_t addr)
525{
526 txdes->txdes2 = cpu_to_le32(addr);
527}
528
529static dma_addr_t ftmac100_txdes_get_dma_addr(struct ftmac100_txdes *txdes)
530{
531 return le32_to_cpu(txdes->txdes2);
532}
533
534/*
535 * txdes3 is not used by hardware. We use it to keep track of socket buffer.
536 * Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu().
537 */
538static void ftmac100_txdes_set_skb(struct ftmac100_txdes *txdes, struct sk_buff *skb)
539{
540 txdes->txdes3 = (unsigned int)skb;
541}
542
543static struct sk_buff *ftmac100_txdes_get_skb(struct ftmac100_txdes *txdes)
544{
545 return (struct sk_buff *)txdes->txdes3;
546}
547
548/******************************************************************************
549 * internal functions (transmit)
550 *****************************************************************************/
551static int ftmac100_next_tx_pointer(int pointer)
552{
553 return (pointer + 1) & (TX_QUEUE_ENTRIES - 1);
554}
555
556static void ftmac100_tx_pointer_advance(struct ftmac100 *priv)
557{
558 priv->tx_pointer = ftmac100_next_tx_pointer(priv->tx_pointer);
559}
560
561static void ftmac100_tx_clean_pointer_advance(struct ftmac100 *priv)
562{
563 priv->tx_clean_pointer = ftmac100_next_tx_pointer(priv->tx_clean_pointer);
564}
565
566static struct ftmac100_txdes *ftmac100_current_txdes(struct ftmac100 *priv)
567{
568 return &priv->descs->txdes[priv->tx_pointer];
569}
570
571static struct ftmac100_txdes *ftmac100_current_clean_txdes(struct ftmac100 *priv)
572{
573 return &priv->descs->txdes[priv->tx_clean_pointer];
574}
575
576static bool ftmac100_tx_complete_packet(struct ftmac100 *priv)
577{
578 struct net_device *netdev = priv->netdev;
579 struct ftmac100_txdes *txdes;
580 struct sk_buff *skb;
581 dma_addr_t map;
582
583 if (priv->tx_pending == 0)
584 return false;
585
586 txdes = ftmac100_current_clean_txdes(priv);
587
588 if (ftmac100_txdes_owned_by_dma(txdes))
589 return false;
590
591 skb = ftmac100_txdes_get_skb(txdes);
592 map = ftmac100_txdes_get_dma_addr(txdes);
593
594 if (unlikely(ftmac100_txdes_excessive_collision(txdes) ||
595 ftmac100_txdes_late_collision(txdes))) {
596 /*
597 * packet transmitted to ethernet lost due to late collision
598 * or excessive collision
599 */
600 netdev->stats.tx_aborted_errors++;
601 } else {
602 netdev->stats.tx_packets++;
603 netdev->stats.tx_bytes += skb->len;
604 }
605
606 dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE);
607 dev_kfree_skb(skb);
608
609 ftmac100_txdes_reset(txdes);
610
611 ftmac100_tx_clean_pointer_advance(priv);
612
613 spin_lock(&priv->tx_lock);
614 priv->tx_pending--;
615 spin_unlock(&priv->tx_lock);
616 netif_wake_queue(netdev);
617
618 return true;
619}
620
621static void ftmac100_tx_complete(struct ftmac100 *priv)
622{
623 while (ftmac100_tx_complete_packet(priv))
624 ;
625}
626
627static int ftmac100_xmit(struct ftmac100 *priv, struct sk_buff *skb,
628 dma_addr_t map)
629{
630 struct net_device *netdev = priv->netdev;
631 struct ftmac100_txdes *txdes;
632 unsigned int len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
633
634 txdes = ftmac100_current_txdes(priv);
635 ftmac100_tx_pointer_advance(priv);
636
637 /* setup TX descriptor */
638 ftmac100_txdes_set_skb(txdes, skb);
639 ftmac100_txdes_set_dma_addr(txdes, map);
640
641 ftmac100_txdes_set_first_segment(txdes);
642 ftmac100_txdes_set_last_segment(txdes);
643 ftmac100_txdes_set_txint(txdes);
644 ftmac100_txdes_set_buffer_size(txdes, len);
645
646 spin_lock(&priv->tx_lock);
647 priv->tx_pending++;
648 if (priv->tx_pending == TX_QUEUE_ENTRIES)
649 netif_stop_queue(netdev);
650
651 /* start transmit */
652 ftmac100_txdes_set_dma_own(txdes);
653 spin_unlock(&priv->tx_lock);
654
655 ftmac100_txdma_start_polling(priv);
656 return NETDEV_TX_OK;
657}
658
659/******************************************************************************
660 * internal functions (buffer)
661 *****************************************************************************/
662static int ftmac100_alloc_rx_page(struct ftmac100 *priv, struct ftmac100_rxdes *rxdes)
663{
664 struct net_device *netdev = priv->netdev;
665 struct page *page;
666 dma_addr_t map;
667
668 page = alloc_page(GFP_KERNEL);
669 if (!page) {
670 if (net_ratelimit())
671 netdev_err(netdev, "failed to allocate rx page\n");
672 return -ENOMEM;
673 }
674
675 map = dma_map_page(priv->dev, page, 0, RX_BUF_SIZE, DMA_FROM_DEVICE);
676 if (unlikely(dma_mapping_error(priv->dev, map))) {
677 if (net_ratelimit())
678 netdev_err(netdev, "failed to map rx page\n");
679 __free_page(page);
680 return -ENOMEM;
681 }
682
683 ftmac100_rxdes_set_page(rxdes, page);
684 ftmac100_rxdes_set_dma_addr(rxdes, map);
685 ftmac100_rxdes_set_buffer_size(rxdes, RX_BUF_SIZE);
686 ftmac100_rxdes_set_dma_own(rxdes);
687 return 0;
688}
689
690static void ftmac100_free_buffers(struct ftmac100 *priv)
691{
692 int i;
693
694 for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
695 struct ftmac100_rxdes *rxdes = &priv->descs->rxdes[i];
696 struct page *page = ftmac100_rxdes_get_page(rxdes);
697 dma_addr_t map = ftmac100_rxdes_get_dma_addr(rxdes);
698
699 if (!page)
700 continue;
701
702 dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
703 __free_page(page);
704 }
705
706 for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
707 struct ftmac100_txdes *txdes = &priv->descs->txdes[i];
708 struct sk_buff *skb = ftmac100_txdes_get_skb(txdes);
709 dma_addr_t map = ftmac100_txdes_get_dma_addr(txdes);
710
711 if (!skb)
712 continue;
713
714 dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE);
715 dev_kfree_skb(skb);
716 }
717
718 dma_free_coherent(priv->dev, sizeof(struct ftmac100_descs),
719 priv->descs, priv->descs_dma_addr);
720}
721
722static int ftmac100_alloc_buffers(struct ftmac100 *priv)
723{
724 int i;
725
726 priv->descs = dma_alloc_coherent(priv->dev, sizeof(struct ftmac100_descs),
727 &priv->descs_dma_addr, GFP_KERNEL);
728 if (!priv->descs)
729 return -ENOMEM;
730
731 memset(priv->descs, 0, sizeof(struct ftmac100_descs));
732
733 /* initialize RX ring */
734 ftmac100_rxdes_set_end_of_ring(&priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]);
735
736 for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
737 struct ftmac100_rxdes *rxdes = &priv->descs->rxdes[i];
738
739 if (ftmac100_alloc_rx_page(priv, rxdes))
740 goto err;
741 }
742
743 /* initialize TX ring */
744 ftmac100_txdes_set_end_of_ring(&priv->descs->txdes[TX_QUEUE_ENTRIES - 1]);
745 return 0;
746
747err:
748 ftmac100_free_buffers(priv);
749 return -ENOMEM;
750}
751
752/******************************************************************************
753 * struct mii_if_info functions
754 *****************************************************************************/
755static int ftmac100_mdio_read(struct net_device *netdev, int phy_id, int reg)
756{
757 struct ftmac100 *priv = netdev_priv(netdev);
758 unsigned int phycr;
759 int i;
760
761 phycr = FTMAC100_PHYCR_PHYAD(phy_id) |
762 FTMAC100_PHYCR_REGAD(reg) |
763 FTMAC100_PHYCR_MIIRD;
764
765 iowrite32(phycr, priv->base + FTMAC100_OFFSET_PHYCR);
766
767 for (i = 0; i < 10; i++) {
768 phycr = ioread32(priv->base + FTMAC100_OFFSET_PHYCR);
769
770 if ((phycr & FTMAC100_PHYCR_MIIRD) == 0)
771 return phycr & FTMAC100_PHYCR_MIIRDATA;
772
773 usleep_range(100, 1000);
774 }
775
776 netdev_err(netdev, "mdio read timed out\n");
777 return 0;
778}
779
780static void ftmac100_mdio_write(struct net_device *netdev, int phy_id, int reg,
781 int data)
782{
783 struct ftmac100 *priv = netdev_priv(netdev);
784 unsigned int phycr;
785 int i;
786
787 phycr = FTMAC100_PHYCR_PHYAD(phy_id) |
788 FTMAC100_PHYCR_REGAD(reg) |
789 FTMAC100_PHYCR_MIIWR;
790
791 data = FTMAC100_PHYWDATA_MIIWDATA(data);
792
793 iowrite32(data, priv->base + FTMAC100_OFFSET_PHYWDATA);
794 iowrite32(phycr, priv->base + FTMAC100_OFFSET_PHYCR);
795
796 for (i = 0; i < 10; i++) {
797 phycr = ioread32(priv->base + FTMAC100_OFFSET_PHYCR);
798
799 if ((phycr & FTMAC100_PHYCR_MIIWR) == 0)
800 return;
801
802 usleep_range(100, 1000);
803 }
804
805 netdev_err(netdev, "mdio write timed out\n");
806}
807
808/******************************************************************************
809 * struct ethtool_ops functions
810 *****************************************************************************/
811static void ftmac100_get_drvinfo(struct net_device *netdev,
812 struct ethtool_drvinfo *info)
813{
814 strcpy(info->driver, DRV_NAME);
815 strcpy(info->version, DRV_VERSION);
816 strcpy(info->bus_info, dev_name(&netdev->dev));
817}
818
819static int ftmac100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
820{
821 struct ftmac100 *priv = netdev_priv(netdev);
822 return mii_ethtool_gset(&priv->mii, cmd);
823}
824
825static int ftmac100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
826{
827 struct ftmac100 *priv = netdev_priv(netdev);
828 return mii_ethtool_sset(&priv->mii, cmd);
829}
830
831static int ftmac100_nway_reset(struct net_device *netdev)
832{
833 struct ftmac100 *priv = netdev_priv(netdev);
834 return mii_nway_restart(&priv->mii);
835}
836
837static u32 ftmac100_get_link(struct net_device *netdev)
838{
839 struct ftmac100 *priv = netdev_priv(netdev);
840 return mii_link_ok(&priv->mii);
841}
842
843static const struct ethtool_ops ftmac100_ethtool_ops = {
844 .set_settings = ftmac100_set_settings,
845 .get_settings = ftmac100_get_settings,
846 .get_drvinfo = ftmac100_get_drvinfo,
847 .nway_reset = ftmac100_nway_reset,
848 .get_link = ftmac100_get_link,
849};
850
851/******************************************************************************
852 * interrupt handler
853 *****************************************************************************/
854static irqreturn_t ftmac100_interrupt(int irq, void *dev_id)
855{
856 struct net_device *netdev = dev_id;
857 struct ftmac100 *priv = netdev_priv(netdev);
858
859 if (likely(netif_running(netdev))) {
860 /* Disable interrupts for polling */
861 ftmac100_disable_all_int(priv);
862 napi_schedule(&priv->napi);
863 }
864
865 return IRQ_HANDLED;
866}
867
868/******************************************************************************
869 * struct napi_struct functions
870 *****************************************************************************/
871static int ftmac100_poll(struct napi_struct *napi, int budget)
872{
873 struct ftmac100 *priv = container_of(napi, struct ftmac100, napi);
874 struct net_device *netdev = priv->netdev;
875 unsigned int status;
876 bool completed = true;
877 int rx = 0;
878
879 status = ioread32(priv->base + FTMAC100_OFFSET_ISR);
880
881 if (status & (FTMAC100_INT_RPKT_FINISH | FTMAC100_INT_NORXBUF)) {
882 /*
883 * FTMAC100_INT_RPKT_FINISH:
884 * RX DMA has received packets into RX buffer successfully
885 *
886 * FTMAC100_INT_NORXBUF:
887 * RX buffer unavailable
888 */
889 bool retry;
890
891 do {
892 retry = ftmac100_rx_packet(priv, &rx);
893 } while (retry && rx < budget);
894
895 if (retry && rx == budget)
896 completed = false;
897 }
898
899 if (status & (FTMAC100_INT_XPKT_OK | FTMAC100_INT_XPKT_LOST)) {
900 /*
901 * FTMAC100_INT_XPKT_OK:
902 * packet transmitted to ethernet successfully
903 *
904 * FTMAC100_INT_XPKT_LOST:
905 * packet transmitted to ethernet lost due to late
906 * collision or excessive collision
907 */
908 ftmac100_tx_complete(priv);
909 }
910
911 if (status & (FTMAC100_INT_NORXBUF | FTMAC100_INT_RPKT_LOST |
912 FTMAC100_INT_AHB_ERR | FTMAC100_INT_PHYSTS_CHG)) {
913 if (net_ratelimit())
914 netdev_info(netdev, "[ISR] = 0x%x: %s%s%s%s\n", status,
915 status & FTMAC100_INT_NORXBUF ? "NORXBUF " : "",
916 status & FTMAC100_INT_RPKT_LOST ? "RPKT_LOST " : "",
917 status & FTMAC100_INT_AHB_ERR ? "AHB_ERR " : "",
918 status & FTMAC100_INT_PHYSTS_CHG ? "PHYSTS_CHG" : "");
919
920 if (status & FTMAC100_INT_NORXBUF) {
921 /* RX buffer unavailable */
922 netdev->stats.rx_over_errors++;
923 }
924
925 if (status & FTMAC100_INT_RPKT_LOST) {
926 /* received packet lost due to RX FIFO full */
927 netdev->stats.rx_fifo_errors++;
928 }
929
930 if (status & FTMAC100_INT_PHYSTS_CHG) {
931 /* PHY link status change */
932 mii_check_link(&priv->mii);
933 }
934 }
935
936 if (completed) {
937 /* stop polling */
938 napi_complete(napi);
939 ftmac100_enable_all_int(priv);
940 }
941
942 return rx;
943}
944
945/******************************************************************************
946 * struct net_device_ops functions
947 *****************************************************************************/
948static int ftmac100_open(struct net_device *netdev)
949{
950 struct ftmac100 *priv = netdev_priv(netdev);
951 int err;
952
953 err = ftmac100_alloc_buffers(priv);
954 if (err) {
955 netdev_err(netdev, "failed to allocate buffers\n");
956 goto err_alloc;
957 }
958
959 err = request_irq(priv->irq, ftmac100_interrupt, 0, netdev->name, netdev);
960 if (err) {
961 netdev_err(netdev, "failed to request irq %d\n", priv->irq);
962 goto err_irq;
963 }
964
965 priv->rx_pointer = 0;
966 priv->tx_clean_pointer = 0;
967 priv->tx_pointer = 0;
968 priv->tx_pending = 0;
969
970 err = ftmac100_start_hw(priv);
971 if (err)
972 goto err_hw;
973
974 napi_enable(&priv->napi);
975 netif_start_queue(netdev);
976
977 ftmac100_enable_all_int(priv);
978
979 return 0;
980
981err_hw:
982 free_irq(priv->irq, netdev);
983err_irq:
984 ftmac100_free_buffers(priv);
985err_alloc:
986 return err;
987}
988
989static int ftmac100_stop(struct net_device *netdev)
990{
991 struct ftmac100 *priv = netdev_priv(netdev);
992
993 ftmac100_disable_all_int(priv);
994 netif_stop_queue(netdev);
995 napi_disable(&priv->napi);
996 ftmac100_stop_hw(priv);
997 free_irq(priv->irq, netdev);
998 ftmac100_free_buffers(priv);
999
1000 return 0;
1001}
1002
1003static int ftmac100_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1004{
1005 struct ftmac100 *priv = netdev_priv(netdev);
1006 dma_addr_t map;
1007
1008 if (unlikely(skb->len > MAX_PKT_SIZE)) {
1009 if (net_ratelimit())
1010 netdev_dbg(netdev, "tx packet too big\n");
1011
1012 netdev->stats.tx_dropped++;
1013 dev_kfree_skb(skb);
1014 return NETDEV_TX_OK;
1015 }
1016
1017 map = dma_map_single(priv->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
1018 if (unlikely(dma_mapping_error(priv->dev, map))) {
1019 /* drop packet */
1020 if (net_ratelimit())
1021 netdev_err(netdev, "map socket buffer failed\n");
1022
1023 netdev->stats.tx_dropped++;
1024 dev_kfree_skb(skb);
1025 return NETDEV_TX_OK;
1026 }
1027
1028 return ftmac100_xmit(priv, skb, map);
1029}
1030
1031/* optional */
1032static int ftmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1033{
1034 struct ftmac100 *priv = netdev_priv(netdev);
1035 struct mii_ioctl_data *data = if_mii(ifr);
1036
1037 return generic_mii_ioctl(&priv->mii, data, cmd, NULL);
1038}
1039
1040static const struct net_device_ops ftmac100_netdev_ops = {
1041 .ndo_open = ftmac100_open,
1042 .ndo_stop = ftmac100_stop,
1043 .ndo_start_xmit = ftmac100_hard_start_xmit,
1044 .ndo_set_mac_address = eth_mac_addr,
1045 .ndo_validate_addr = eth_validate_addr,
1046 .ndo_do_ioctl = ftmac100_do_ioctl,
1047};
1048
1049/******************************************************************************
1050 * struct platform_driver functions
1051 *****************************************************************************/
1052static int ftmac100_probe(struct platform_device *pdev)
1053{
1054 struct resource *res;
1055 int irq;
1056 struct net_device *netdev;
1057 struct ftmac100 *priv;
1058 int err;
1059
1060 if (!pdev)
1061 return -ENODEV;
1062
1063 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1064 if (!res)
1065 return -ENXIO;
1066
1067 irq = platform_get_irq(pdev, 0);
1068 if (irq < 0)
1069 return irq;
1070
1071 /* setup net_device */
1072 netdev = alloc_etherdev(sizeof(*priv));
1073 if (!netdev) {
1074 err = -ENOMEM;
1075 goto err_alloc_etherdev;
1076 }
1077
1078 SET_NETDEV_DEV(netdev, &pdev->dev);
1079 SET_ETHTOOL_OPS(netdev, &ftmac100_ethtool_ops);
1080 netdev->netdev_ops = &ftmac100_netdev_ops;
1081
1082 platform_set_drvdata(pdev, netdev);
1083
1084 /* setup private data */
1085 priv = netdev_priv(netdev);
1086 priv->netdev = netdev;
1087 priv->dev = &pdev->dev;
1088
1089 spin_lock_init(&priv->tx_lock);
1090
1091 /* initialize NAPI */
1092 netif_napi_add(netdev, &priv->napi, ftmac100_poll, 64);
1093
1094 /* map io memory */
1095 priv->res = request_mem_region(res->start, resource_size(res),
1096 dev_name(&pdev->dev));
1097 if (!priv->res) {
1098 dev_err(&pdev->dev, "Could not reserve memory region\n");
1099 err = -ENOMEM;
1100 goto err_req_mem;
1101 }
1102
1103 priv->base = ioremap(res->start, res->end - res->start);
1104 if (!priv->base) {
1105 dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n");
1106 err = -EIO;
1107 goto err_ioremap;
1108 }
1109
1110 priv->irq = irq;
1111
1112 /* initialize struct mii_if_info */
1113 priv->mii.phy_id = 0;
1114 priv->mii.phy_id_mask = 0x1f;
1115 priv->mii.reg_num_mask = 0x1f;
1116 priv->mii.dev = netdev;
1117 priv->mii.mdio_read = ftmac100_mdio_read;
1118 priv->mii.mdio_write = ftmac100_mdio_write;
1119
1120 /* register network device */
1121 err = register_netdev(netdev);
1122 if (err) {
1123 dev_err(&pdev->dev, "Failed to register netdev\n");
1124 goto err_register_netdev;
1125 }
1126
1127 netdev_info(netdev, "irq %d, mapped at %p\n", priv->irq, priv->base);
1128
1129 if (!is_valid_ether_addr(netdev->dev_addr)) {
1130 random_ether_addr(netdev->dev_addr);
1131 netdev_info(netdev, "generated random MAC address %pM\n",
1132 netdev->dev_addr);
1133 }
1134
1135 return 0;
1136
1137err_register_netdev:
1138 iounmap(priv->base);
1139err_ioremap:
1140 release_resource(priv->res);
1141err_req_mem:
1142 netif_napi_del(&priv->napi);
1143 platform_set_drvdata(pdev, NULL);
1144 free_netdev(netdev);
1145err_alloc_etherdev:
1146 return err;
1147}
1148
1149static int __exit ftmac100_remove(struct platform_device *pdev)
1150{
1151 struct net_device *netdev;
1152 struct ftmac100 *priv;
1153
1154 netdev = platform_get_drvdata(pdev);
1155 priv = netdev_priv(netdev);
1156
1157 unregister_netdev(netdev);
1158
1159 iounmap(priv->base);
1160 release_resource(priv->res);
1161
1162 netif_napi_del(&priv->napi);
1163 platform_set_drvdata(pdev, NULL);
1164 free_netdev(netdev);
1165 return 0;
1166}
1167
1168static struct platform_driver ftmac100_driver = {
1169 .probe = ftmac100_probe,
1170 .remove = __exit_p(ftmac100_remove),
1171 .driver = {
1172 .name = DRV_NAME,
1173 .owner = THIS_MODULE,
1174 },
1175};
1176
1177/******************************************************************************
1178 * initialization / finalization
1179 *****************************************************************************/
1180static int __init ftmac100_init(void)
1181{
1182 pr_info("Loading version " DRV_VERSION " ...\n");
1183 return platform_driver_register(&ftmac100_driver);
1184}
1185
1186static void __exit ftmac100_exit(void)
1187{
1188 platform_driver_unregister(&ftmac100_driver);
1189}
1190
1191module_init(ftmac100_init);
1192module_exit(ftmac100_exit);
1193
1194MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>");
1195MODULE_DESCRIPTION("FTMAC100 driver");
1196MODULE_LICENSE("GPL");
diff --git a/drivers/net/ftmac100.h b/drivers/net/ftmac100.h
new file mode 100644
index 00000000000..46a0c47b1ee
--- /dev/null
+++ b/drivers/net/ftmac100.h
@@ -0,0 +1,180 @@
1/*
2 * Faraday FTMAC100 10/100 Ethernet
3 *
4 * (C) Copyright 2009-2011 Faraday Technology
5 * Po-Yu Chuang <ratbert@faraday-tech.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22#ifndef __FTMAC100_H
23#define __FTMAC100_H
24
25#define FTMAC100_OFFSET_ISR 0x00
26#define FTMAC100_OFFSET_IMR 0x04
27#define FTMAC100_OFFSET_MAC_MADR 0x08
28#define FTMAC100_OFFSET_MAC_LADR 0x0c
29#define FTMAC100_OFFSET_MAHT0 0x10
30#define FTMAC100_OFFSET_MAHT1 0x14
31#define FTMAC100_OFFSET_TXPD 0x18
32#define FTMAC100_OFFSET_RXPD 0x1c
33#define FTMAC100_OFFSET_TXR_BADR 0x20
34#define FTMAC100_OFFSET_RXR_BADR 0x24
35#define FTMAC100_OFFSET_ITC 0x28
36#define FTMAC100_OFFSET_APTC 0x2c
37#define FTMAC100_OFFSET_DBLAC 0x30
38#define FTMAC100_OFFSET_MACCR 0x88
39#define FTMAC100_OFFSET_MACSR 0x8c
40#define FTMAC100_OFFSET_PHYCR 0x90
41#define FTMAC100_OFFSET_PHYWDATA 0x94
42#define FTMAC100_OFFSET_FCR 0x98
43#define FTMAC100_OFFSET_BPR 0x9c
44#define FTMAC100_OFFSET_TS 0xc4
45#define FTMAC100_OFFSET_DMAFIFOS 0xc8
46#define FTMAC100_OFFSET_TM 0xcc
47#define FTMAC100_OFFSET_TX_MCOL_SCOL 0xd4
48#define FTMAC100_OFFSET_RPF_AEP 0xd8
49#define FTMAC100_OFFSET_XM_PG 0xdc
50#define FTMAC100_OFFSET_RUNT_TLCC 0xe0
51#define FTMAC100_OFFSET_CRCER_FTL 0xe4
52#define FTMAC100_OFFSET_RLC_RCC 0xe8
53#define FTMAC100_OFFSET_BROC 0xec
54#define FTMAC100_OFFSET_MULCA 0xf0
55#define FTMAC100_OFFSET_RP 0xf4
56#define FTMAC100_OFFSET_XP 0xf8
57
58/*
59 * Interrupt status register & interrupt mask register
60 */
61#define FTMAC100_INT_RPKT_FINISH (1 << 0)
62#define FTMAC100_INT_NORXBUF (1 << 1)
63#define FTMAC100_INT_XPKT_FINISH (1 << 2)
64#define FTMAC100_INT_NOTXBUF (1 << 3)
65#define FTMAC100_INT_XPKT_OK (1 << 4)
66#define FTMAC100_INT_XPKT_LOST (1 << 5)
67#define FTMAC100_INT_RPKT_SAV (1 << 6)
68#define FTMAC100_INT_RPKT_LOST (1 << 7)
69#define FTMAC100_INT_AHB_ERR (1 << 8)
70#define FTMAC100_INT_PHYSTS_CHG (1 << 9)
71
72/*
73 * Interrupt timer control register
74 */
75#define FTMAC100_ITC_RXINT_CNT(x) (((x) & 0xf) << 0)
76#define FTMAC100_ITC_RXINT_THR(x) (((x) & 0x7) << 4)
77#define FTMAC100_ITC_RXINT_TIME_SEL (1 << 7)
78#define FTMAC100_ITC_TXINT_CNT(x) (((x) & 0xf) << 8)
79#define FTMAC100_ITC_TXINT_THR(x) (((x) & 0x7) << 12)
80#define FTMAC100_ITC_TXINT_TIME_SEL (1 << 15)
81
82/*
83 * Automatic polling timer control register
84 */
85#define FTMAC100_APTC_RXPOLL_CNT(x) (((x) & 0xf) << 0)
86#define FTMAC100_APTC_RXPOLL_TIME_SEL (1 << 4)
87#define FTMAC100_APTC_TXPOLL_CNT(x) (((x) & 0xf) << 8)
88#define FTMAC100_APTC_TXPOLL_TIME_SEL (1 << 12)
89
90/*
91 * DMA burst length and arbitration control register
92 */
93#define FTMAC100_DBLAC_INCR4_EN (1 << 0)
94#define FTMAC100_DBLAC_INCR8_EN (1 << 1)
95#define FTMAC100_DBLAC_INCR16_EN (1 << 2)
96#define FTMAC100_DBLAC_RXFIFO_LTHR(x) (((x) & 0x7) << 3)
97#define FTMAC100_DBLAC_RXFIFO_HTHR(x) (((x) & 0x7) << 6)
98#define FTMAC100_DBLAC_RX_THR_EN (1 << 9)
99
100/*
101 * MAC control register
102 */
103#define FTMAC100_MACCR_XDMA_EN (1 << 0)
104#define FTMAC100_MACCR_RDMA_EN (1 << 1)
105#define FTMAC100_MACCR_SW_RST (1 << 2)
106#define FTMAC100_MACCR_LOOP_EN (1 << 3)
107#define FTMAC100_MACCR_CRC_DIS (1 << 4)
108#define FTMAC100_MACCR_XMT_EN (1 << 5)
109#define FTMAC100_MACCR_ENRX_IN_HALFTX (1 << 6)
110#define FTMAC100_MACCR_RCV_EN (1 << 8)
111#define FTMAC100_MACCR_HT_MULTI_EN (1 << 9)
112#define FTMAC100_MACCR_RX_RUNT (1 << 10)
113#define FTMAC100_MACCR_RX_FTL (1 << 11)
114#define FTMAC100_MACCR_RCV_ALL (1 << 12)
115#define FTMAC100_MACCR_CRC_APD (1 << 14)
116#define FTMAC100_MACCR_FULLDUP (1 << 15)
117#define FTMAC100_MACCR_RX_MULTIPKT (1 << 16)
118#define FTMAC100_MACCR_RX_BROADPKT (1 << 17)
119
120/*
121 * PHY control register
122 */
123#define FTMAC100_PHYCR_MIIRDATA 0xffff
124#define FTMAC100_PHYCR_PHYAD(x) (((x) & 0x1f) << 16)
125#define FTMAC100_PHYCR_REGAD(x) (((x) & 0x1f) << 21)
126#define FTMAC100_PHYCR_MIIRD (1 << 26)
127#define FTMAC100_PHYCR_MIIWR (1 << 27)
128
129/*
130 * PHY write data register
131 */
132#define FTMAC100_PHYWDATA_MIIWDATA(x) ((x) & 0xffff)
133
134/*
135 * Transmit descriptor, aligned to 16 bytes
136 */
137struct ftmac100_txdes {
138 unsigned int txdes0;
139 unsigned int txdes1;
140 unsigned int txdes2; /* TXBUF_BADR */
141 unsigned int txdes3; /* not used by HW */
142} __attribute__ ((aligned(16)));
143
144#define FTMAC100_TXDES0_TXPKT_LATECOL (1 << 0)
145#define FTMAC100_TXDES0_TXPKT_EXSCOL (1 << 1)
146#define FTMAC100_TXDES0_TXDMA_OWN (1 << 31)
147
148#define FTMAC100_TXDES1_TXBUF_SIZE(x) ((x) & 0x7ff)
149#define FTMAC100_TXDES1_LTS (1 << 27)
150#define FTMAC100_TXDES1_FTS (1 << 28)
151#define FTMAC100_TXDES1_TX2FIC (1 << 29)
152#define FTMAC100_TXDES1_TXIC (1 << 30)
153#define FTMAC100_TXDES1_EDOTR (1 << 31)
154
155/*
156 * Receive descriptor, aligned to 16 bytes
157 */
158struct ftmac100_rxdes {
159 unsigned int rxdes0;
160 unsigned int rxdes1;
161 unsigned int rxdes2; /* RXBUF_BADR */
162 unsigned int rxdes3; /* not used by HW */
163} __attribute__ ((aligned(16)));
164
165#define FTMAC100_RXDES0_RFL 0x7ff
166#define FTMAC100_RXDES0_MULTICAST (1 << 16)
167#define FTMAC100_RXDES0_BROADCAST (1 << 17)
168#define FTMAC100_RXDES0_RX_ERR (1 << 18)
169#define FTMAC100_RXDES0_CRC_ERR (1 << 19)
170#define FTMAC100_RXDES0_FTL (1 << 20)
171#define FTMAC100_RXDES0_RUNT (1 << 21)
172#define FTMAC100_RXDES0_RX_ODD_NB (1 << 22)
173#define FTMAC100_RXDES0_LRS (1 << 28)
174#define FTMAC100_RXDES0_FRS (1 << 29)
175#define FTMAC100_RXDES0_RXDMA_OWN (1 << 31)
176
177#define FTMAC100_RXDES1_RXBUF_SIZE(x) ((x) & 0x7ff)
178#define FTMAC100_RXDES1_EDORR (1 << 31)
179
180#endif /* __FTMAC100_H */
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index ac1d323c5eb..8931168d3e7 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -400,13 +400,14 @@ static void *bpq_seq_start(struct seq_file *seq, loff_t *pos)
400static void *bpq_seq_next(struct seq_file *seq, void *v, loff_t *pos) 400static void *bpq_seq_next(struct seq_file *seq, void *v, loff_t *pos)
401{ 401{
402 struct list_head *p; 402 struct list_head *p;
403 struct bpqdev *bpqdev = v;
403 404
404 ++*pos; 405 ++*pos;
405 406
406 if (v == SEQ_START_TOKEN) 407 if (v == SEQ_START_TOKEN)
407 p = rcu_dereference(bpq_devices.next); 408 p = rcu_dereference(list_next_rcu(&bpq_devices));
408 else 409 else
409 p = rcu_dereference(((struct bpqdev *)v)->bpq_list.next); 410 p = rcu_dereference(list_next_rcu(&bpqdev->bpq_list));
410 411
411 return (p == &bpq_devices) ? NULL 412 return (p == &bpq_devices) ? NULL
412 : list_entry(p, struct bpqdev, bpq_list); 413 : list_entry(p, struct bpqdev, bpq_list);
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index 0a2368fa6bc..65c1833244f 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -129,6 +129,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
129 break; 129 break;
130 case E1000_DEV_ID_82580_COPPER: 130 case E1000_DEV_ID_82580_COPPER:
131 case E1000_DEV_ID_82580_FIBER: 131 case E1000_DEV_ID_82580_FIBER:
132 case E1000_DEV_ID_82580_QUAD_FIBER:
132 case E1000_DEV_ID_82580_SERDES: 133 case E1000_DEV_ID_82580_SERDES:
133 case E1000_DEV_ID_82580_SGMII: 134 case E1000_DEV_ID_82580_SGMII:
134 case E1000_DEV_ID_82580_COPPER_DUAL: 135 case E1000_DEV_ID_82580_COPPER_DUAL:
@@ -237,9 +238,15 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
237 size = 14; 238 size = 14;
238 nvm->word_size = 1 << size; 239 nvm->word_size = 1 << size;
239 240
240 /* if 82576 then initialize mailbox parameters */ 241 /* if part supports SR-IOV then initialize mailbox parameters */
241 if (mac->type == e1000_82576) 242 switch (mac->type) {
243 case e1000_82576:
244 case e1000_i350:
242 igb_init_mbx_params_pf(hw); 245 igb_init_mbx_params_pf(hw);
246 break;
247 default:
248 break;
249 }
243 250
244 /* setup PHY parameters */ 251 /* setup PHY parameters */
245 if (phy->media_type != e1000_media_type_copper) { 252 if (phy->media_type != e1000_media_type_copper) {
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index 6319ed902bc..ff46c91520a 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -770,4 +770,11 @@
770#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision based 770#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision based
771 on DMA coal */ 771 on DMA coal */
772 772
773/* Tx Rate-Scheduler Config fields */
774#define E1000_RTTBCNRC_RS_ENA 0x80000000
775#define E1000_RTTBCNRC_RF_DEC_MASK 0x00003FFF
776#define E1000_RTTBCNRC_RF_INT_SHIFT 14
777#define E1000_RTTBCNRC_RF_INT_MASK \
778 (E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT)
779
773#endif 780#endif
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
index e2638afb8cd..281324e8598 100644
--- a/drivers/net/igb/e1000_hw.h
+++ b/drivers/net/igb/e1000_hw.h
@@ -54,6 +54,7 @@ struct e1000_hw;
54#define E1000_DEV_ID_82580_SERDES 0x1510 54#define E1000_DEV_ID_82580_SERDES 0x1510
55#define E1000_DEV_ID_82580_SGMII 0x1511 55#define E1000_DEV_ID_82580_SGMII 0x1511
56#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516 56#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516
57#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527
57#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438 58#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438
58#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A 59#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A
59#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C 60#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C
diff --git a/drivers/net/igb/e1000_mbx.c b/drivers/net/igb/e1000_mbx.c
index c474cdb7004..78d48c7fa85 100644
--- a/drivers/net/igb/e1000_mbx.c
+++ b/drivers/net/igb/e1000_mbx.c
@@ -422,26 +422,24 @@ s32 igb_init_mbx_params_pf(struct e1000_hw *hw)
422{ 422{
423 struct e1000_mbx_info *mbx = &hw->mbx; 423 struct e1000_mbx_info *mbx = &hw->mbx;
424 424
425 if (hw->mac.type == e1000_82576) { 425 mbx->timeout = 0;
426 mbx->timeout = 0; 426 mbx->usec_delay = 0;
427 mbx->usec_delay = 0; 427
428 428 mbx->size = E1000_VFMAILBOX_SIZE;
429 mbx->size = E1000_VFMAILBOX_SIZE; 429
430 430 mbx->ops.read = igb_read_mbx_pf;
431 mbx->ops.read = igb_read_mbx_pf; 431 mbx->ops.write = igb_write_mbx_pf;
432 mbx->ops.write = igb_write_mbx_pf; 432 mbx->ops.read_posted = igb_read_posted_mbx;
433 mbx->ops.read_posted = igb_read_posted_mbx; 433 mbx->ops.write_posted = igb_write_posted_mbx;
434 mbx->ops.write_posted = igb_write_posted_mbx; 434 mbx->ops.check_for_msg = igb_check_for_msg_pf;
435 mbx->ops.check_for_msg = igb_check_for_msg_pf; 435 mbx->ops.check_for_ack = igb_check_for_ack_pf;
436 mbx->ops.check_for_ack = igb_check_for_ack_pf; 436 mbx->ops.check_for_rst = igb_check_for_rst_pf;
437 mbx->ops.check_for_rst = igb_check_for_rst_pf; 437
438 438 mbx->stats.msgs_tx = 0;
439 mbx->stats.msgs_tx = 0; 439 mbx->stats.msgs_rx = 0;
440 mbx->stats.msgs_rx = 0; 440 mbx->stats.reqs = 0;
441 mbx->stats.reqs = 0; 441 mbx->stats.acks = 0;
442 mbx->stats.acks = 0; 442 mbx->stats.rsts = 0;
443 mbx->stats.rsts = 0;
444 }
445 443
446 return 0; 444 return 0;
447} 445}
diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h
index 8ac83c5190d..3a6f8471aea 100644
--- a/drivers/net/igb/e1000_regs.h
+++ b/drivers/net/igb/e1000_regs.h
@@ -106,6 +106,10 @@
106 106
107#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40)) 107#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40))
108 108
109/* TX Rate Limit Registers */
110#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */
111#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config - WO */
112
109/* Split and Replication RX Control - RW */ 113/* Split and Replication RX Control - RW */
110#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ 114#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
111/* 115/*
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 92a4ef09e55..bbc5ebfe254 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -77,6 +77,7 @@ struct vf_data_storage {
77 unsigned long last_nack; 77 unsigned long last_nack;
78 u16 pf_vlan; /* When set, guest VLAN config not allowed. */ 78 u16 pf_vlan; /* When set, guest VLAN config not allowed. */
79 u16 pf_qos; 79 u16 pf_qos;
80 u16 tx_rate;
80}; 81};
81 82
82#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */ 83#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */
@@ -323,6 +324,7 @@ struct igb_adapter {
323 u16 rx_ring_count; 324 u16 rx_ring_count;
324 unsigned int vfs_allocated_count; 325 unsigned int vfs_allocated_count;
325 struct vf_data_storage *vf_data; 326 struct vf_data_storage *vf_data;
327 int vf_rate_link_speed;
326 u32 rss_queues; 328 u32 rss_queues;
327 u32 wvbr; 329 u32 wvbr;
328}; 330};
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index a70e16bcfa7..61f7849cb5a 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -727,8 +727,9 @@ static void igb_get_drvinfo(struct net_device *netdev,
727 char firmware_version[32]; 727 char firmware_version[32];
728 u16 eeprom_data; 728 u16 eeprom_data;
729 729
730 strncpy(drvinfo->driver, igb_driver_name, 32); 730 strncpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver) - 1);
731 strncpy(drvinfo->version, igb_driver_version, 32); 731 strncpy(drvinfo->version, igb_driver_version,
732 sizeof(drvinfo->version) - 1);
732 733
733 /* EEPROM image version # is reported as firmware version # for 734 /* EEPROM image version # is reported as firmware version # for
734 * 82575 controllers */ 735 * 82575 controllers */
@@ -738,8 +739,10 @@ static void igb_get_drvinfo(struct net_device *netdev,
738 (eeprom_data & 0x0FF0) >> 4, 739 (eeprom_data & 0x0FF0) >> 4,
739 eeprom_data & 0x000F); 740 eeprom_data & 0x000F);
740 741
741 strncpy(drvinfo->fw_version, firmware_version, 32); 742 strncpy(drvinfo->fw_version, firmware_version,
742 strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); 743 sizeof(drvinfo->fw_version) - 1);
744 strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
745 sizeof(drvinfo->bus_info) - 1);
743 drvinfo->n_stats = IGB_STATS_LEN; 746 drvinfo->n_stats = IGB_STATS_LEN;
744 drvinfo->testinfo_len = IGB_TEST_LEN; 747 drvinfo->testinfo_len = IGB_TEST_LEN;
745 drvinfo->regdump_len = igb_get_regs_len(netdev); 748 drvinfo->regdump_len = igb_get_regs_len(netdev);
@@ -1070,7 +1073,7 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
1070 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; 1073 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
1071 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { 1074 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
1072 wr32(reg, (_test[pat] & write)); 1075 wr32(reg, (_test[pat] & write));
1073 val = rd32(reg); 1076 val = rd32(reg) & mask;
1074 if (val != (_test[pat] & write & mask)) { 1077 if (val != (_test[pat] & write & mask)) {
1075 dev_err(&adapter->pdev->dev, "pattern test reg %04X " 1078 dev_err(&adapter->pdev->dev, "pattern test reg %04X "
1076 "failed: got 0x%08X expected 0x%08X\n", 1079 "failed: got 0x%08X expected 0x%08X\n",
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 58c665b7513..eef380af053 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -50,12 +50,12 @@
50#endif 50#endif
51#include "igb.h" 51#include "igb.h"
52 52
53#define DRV_VERSION "2.1.0-k2" 53#define DRV_VERSION "2.4.13-k2"
54char igb_driver_name[] = "igb"; 54char igb_driver_name[] = "igb";
55char igb_driver_version[] = DRV_VERSION; 55char igb_driver_version[] = DRV_VERSION;
56static const char igb_driver_string[] = 56static const char igb_driver_string[] =
57 "Intel(R) Gigabit Ethernet Network Driver"; 57 "Intel(R) Gigabit Ethernet Network Driver";
58static const char igb_copyright[] = "Copyright (c) 2007-2009 Intel Corporation."; 58static const char igb_copyright[] = "Copyright (c) 2007-2011 Intel Corporation.";
59 59
60static const struct e1000_info *igb_info_tbl[] = { 60static const struct e1000_info *igb_info_tbl[] = {
61 [board_82575] = &e1000_82575_info, 61 [board_82575] = &e1000_82575_info,
@@ -68,6 +68,7 @@ static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 }, 68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
69 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 }, 69 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
70 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 }, 70 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
71 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
71 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 }, 72 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
72 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 }, 73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 }, 74 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
@@ -149,6 +150,7 @@ static int igb_ndo_set_vf_vlan(struct net_device *netdev,
149static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate); 150static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
150static int igb_ndo_get_vf_config(struct net_device *netdev, int vf, 151static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
151 struct ifla_vf_info *ivi); 152 struct ifla_vf_info *ivi);
153static void igb_check_vf_rate_limit(struct igb_adapter *);
152 154
153#ifdef CONFIG_PM 155#ifdef CONFIG_PM
154static int igb_suspend(struct pci_dev *, pm_message_t); 156static int igb_suspend(struct pci_dev *, pm_message_t);
@@ -2286,9 +2288,19 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
2286 2288
2287 spin_lock_init(&adapter->stats64_lock); 2289 spin_lock_init(&adapter->stats64_lock);
2288#ifdef CONFIG_PCI_IOV 2290#ifdef CONFIG_PCI_IOV
2289 if (hw->mac.type == e1000_82576) 2291 switch (hw->mac.type) {
2290 adapter->vfs_allocated_count = (max_vfs > 7) ? 7 : max_vfs; 2292 case e1000_82576:
2291 2293 case e1000_i350:
2294 if (max_vfs > 7) {
2295 dev_warn(&pdev->dev,
2296 "Maximum of 7 VFs per PF, using max\n");
2297 adapter->vfs_allocated_count = 7;
2298 } else
2299 adapter->vfs_allocated_count = max_vfs;
2300 break;
2301 default:
2302 break;
2303 }
2292#endif /* CONFIG_PCI_IOV */ 2304#endif /* CONFIG_PCI_IOV */
2293 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus()); 2305 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
2294 2306
@@ -3505,6 +3517,7 @@ static void igb_watchdog_task(struct work_struct *work)
3505 netif_carrier_on(netdev); 3517 netif_carrier_on(netdev);
3506 3518
3507 igb_ping_all_vfs(adapter); 3519 igb_ping_all_vfs(adapter);
3520 igb_check_vf_rate_limit(adapter);
3508 3521
3509 /* link state has changed, schedule phy info update */ 3522 /* link state has changed, schedule phy info update */
3510 if (!test_bit(__IGB_DOWN, &adapter->state)) 3523 if (!test_bit(__IGB_DOWN, &adapter->state))
@@ -6593,9 +6606,91 @@ static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
6593 return igb_set_vf_mac(adapter, vf, mac); 6606 return igb_set_vf_mac(adapter, vf, mac);
6594} 6607}
6595 6608
6609static int igb_link_mbps(int internal_link_speed)
6610{
6611 switch (internal_link_speed) {
6612 case SPEED_100:
6613 return 100;
6614 case SPEED_1000:
6615 return 1000;
6616 default:
6617 return 0;
6618 }
6619}
6620
6621static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
6622 int link_speed)
6623{
6624 int rf_dec, rf_int;
6625 u32 bcnrc_val;
6626
6627 if (tx_rate != 0) {
6628 /* Calculate the rate factor values to set */
6629 rf_int = link_speed / tx_rate;
6630 rf_dec = (link_speed - (rf_int * tx_rate));
6631 rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
6632
6633 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
6634 bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) &
6635 E1000_RTTBCNRC_RF_INT_MASK);
6636 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
6637 } else {
6638 bcnrc_val = 0;
6639 }
6640
6641 wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
6642 wr32(E1000_RTTBCNRC, bcnrc_val);
6643}
6644
6645static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
6646{
6647 int actual_link_speed, i;
6648 bool reset_rate = false;
6649
6650 /* VF TX rate limit was not set or not supported */
6651 if ((adapter->vf_rate_link_speed == 0) ||
6652 (adapter->hw.mac.type != e1000_82576))
6653 return;
6654
6655 actual_link_speed = igb_link_mbps(adapter->link_speed);
6656 if (actual_link_speed != adapter->vf_rate_link_speed) {
6657 reset_rate = true;
6658 adapter->vf_rate_link_speed = 0;
6659 dev_info(&adapter->pdev->dev,
6660 "Link speed has been changed. VF Transmit "
6661 "rate is disabled\n");
6662 }
6663
6664 for (i = 0; i < adapter->vfs_allocated_count; i++) {
6665 if (reset_rate)
6666 adapter->vf_data[i].tx_rate = 0;
6667
6668 igb_set_vf_rate_limit(&adapter->hw, i,
6669 adapter->vf_data[i].tx_rate,
6670 actual_link_speed);
6671 }
6672}
6673
6596static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate) 6674static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
6597{ 6675{
6598 return -EOPNOTSUPP; 6676 struct igb_adapter *adapter = netdev_priv(netdev);
6677 struct e1000_hw *hw = &adapter->hw;
6678 int actual_link_speed;
6679
6680 if (hw->mac.type != e1000_82576)
6681 return -EOPNOTSUPP;
6682
6683 actual_link_speed = igb_link_mbps(adapter->link_speed);
6684 if ((vf >= adapter->vfs_allocated_count) ||
6685 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
6686 (tx_rate < 0) || (tx_rate > actual_link_speed))
6687 return -EINVAL;
6688
6689 adapter->vf_rate_link_speed = actual_link_speed;
6690 adapter->vf_data[vf].tx_rate = (u16)tx_rate;
6691 igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
6692
6693 return 0;
6599} 6694}
6600 6695
6601static int igb_ndo_get_vf_config(struct net_device *netdev, 6696static int igb_ndo_get_vf_config(struct net_device *netdev,
@@ -6606,7 +6701,7 @@ static int igb_ndo_get_vf_config(struct net_device *netdev,
6606 return -EINVAL; 6701 return -EINVAL;
6607 ivi->vf = vf; 6702 ivi->vf = vf;
6608 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN); 6703 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
6609 ivi->tx_rate = 0; 6704 ivi->tx_rate = adapter->vf_data[vf].tx_rate;
6610 ivi->vlan = adapter->vf_data[vf].pf_vlan; 6705 ivi->vlan = adapter->vf_data[vf].pf_vlan;
6611 ivi->qos = adapter->vf_data[vf].pf_qos; 6706 ivi->qos = adapter->vf_data[vf].pf_qos;
6612 return 0; 6707 return 0;
diff --git a/drivers/net/igbvf/ethtool.c b/drivers/net/igbvf/ethtool.c
index ed6e3d91024..1d943aa7c7a 100644
--- a/drivers/net/igbvf/ethtool.c
+++ b/drivers/net/igbvf/ethtool.c
@@ -201,13 +201,11 @@ static void igbvf_get_regs(struct net_device *netdev,
201 struct igbvf_adapter *adapter = netdev_priv(netdev); 201 struct igbvf_adapter *adapter = netdev_priv(netdev);
202 struct e1000_hw *hw = &adapter->hw; 202 struct e1000_hw *hw = &adapter->hw;
203 u32 *regs_buff = p; 203 u32 *regs_buff = p;
204 u8 revision_id;
205 204
206 memset(p, 0, IGBVF_REGS_LEN * sizeof(u32)); 205 memset(p, 0, IGBVF_REGS_LEN * sizeof(u32));
207 206
208 pci_read_config_byte(adapter->pdev, PCI_REVISION_ID, &revision_id); 207 regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
209 208 adapter->pdev->device;
210 regs->version = (1 << 24) | (revision_id << 16) | adapter->pdev->device;
211 209
212 regs_buff[0] = er32(CTRL); 210 regs_buff[0] = er32(CTRL);
213 regs_buff[1] = er32(STATUS); 211 regs_buff[1] = er32(STATUS);
diff --git a/drivers/net/igbvf/igbvf.h b/drivers/net/igbvf/igbvf.h
index 990c329e6c3..d5dad5d607d 100644
--- a/drivers/net/igbvf/igbvf.h
+++ b/drivers/net/igbvf/igbvf.h
@@ -201,9 +201,6 @@ struct igbvf_adapter {
201 unsigned int restart_queue; 201 unsigned int restart_queue;
202 u32 txd_cmd; 202 u32 txd_cmd;
203 203
204 bool detect_tx_hung;
205 u8 tx_timeout_factor;
206
207 u32 tx_int_delay; 204 u32 tx_int_delay;
208 u32 tx_abs_int_delay; 205 u32 tx_abs_int_delay;
209 206
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 6352c8158e6..6ccc32fd733 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -396,35 +396,6 @@ static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
396 buffer_info->time_stamp = 0; 396 buffer_info->time_stamp = 0;
397} 397}
398 398
399static void igbvf_print_tx_hang(struct igbvf_adapter *adapter)
400{
401 struct igbvf_ring *tx_ring = adapter->tx_ring;
402 unsigned int i = tx_ring->next_to_clean;
403 unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
404 union e1000_adv_tx_desc *eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
405
406 /* detected Tx unit hang */
407 dev_err(&adapter->pdev->dev,
408 "Detected Tx Unit Hang:\n"
409 " TDH <%x>\n"
410 " TDT <%x>\n"
411 " next_to_use <%x>\n"
412 " next_to_clean <%x>\n"
413 "buffer_info[next_to_clean]:\n"
414 " time_stamp <%lx>\n"
415 " next_to_watch <%x>\n"
416 " jiffies <%lx>\n"
417 " next_to_watch.status <%x>\n",
418 readl(adapter->hw.hw_addr + tx_ring->head),
419 readl(adapter->hw.hw_addr + tx_ring->tail),
420 tx_ring->next_to_use,
421 tx_ring->next_to_clean,
422 tx_ring->buffer_info[eop].time_stamp,
423 eop,
424 jiffies,
425 eop_desc->wb.status);
426}
427
428/** 399/**
429 * igbvf_setup_tx_resources - allocate Tx resources (Descriptors) 400 * igbvf_setup_tx_resources - allocate Tx resources (Descriptors)
430 * @adapter: board private structure 401 * @adapter: board private structure
@@ -771,7 +742,6 @@ static void igbvf_set_itr(struct igbvf_adapter *adapter)
771static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) 742static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
772{ 743{
773 struct igbvf_adapter *adapter = tx_ring->adapter; 744 struct igbvf_adapter *adapter = tx_ring->adapter;
774 struct e1000_hw *hw = &adapter->hw;
775 struct net_device *netdev = adapter->netdev; 745 struct net_device *netdev = adapter->netdev;
776 struct igbvf_buffer *buffer_info; 746 struct igbvf_buffer *buffer_info;
777 struct sk_buff *skb; 747 struct sk_buff *skb;
@@ -832,22 +802,6 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
832 } 802 }
833 } 803 }
834 804
835 if (adapter->detect_tx_hung) {
836 /* Detect a transmit hang in hardware, this serializes the
837 * check with the clearing of time_stamp and movement of i */
838 adapter->detect_tx_hung = false;
839 if (tx_ring->buffer_info[i].time_stamp &&
840 time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
841 (adapter->tx_timeout_factor * HZ)) &&
842 !(er32(STATUS) & E1000_STATUS_TXOFF)) {
843
844 tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
845 /* detected Tx unit hang */
846 igbvf_print_tx_hang(adapter);
847
848 netif_stop_queue(netdev);
849 }
850 }
851 adapter->net_stats.tx_bytes += total_bytes; 805 adapter->net_stats.tx_bytes += total_bytes;
852 adapter->net_stats.tx_packets += total_packets; 806 adapter->net_stats.tx_packets += total_packets;
853 return count < tx_ring->count; 807 return count < tx_ring->count;
@@ -1863,17 +1817,6 @@ static void igbvf_watchdog_task(struct work_struct *work)
1863 &adapter->link_duplex); 1817 &adapter->link_duplex);
1864 igbvf_print_link_info(adapter); 1818 igbvf_print_link_info(adapter);
1865 1819
1866 /* adjust timeout factor according to speed/duplex */
1867 adapter->tx_timeout_factor = 1;
1868 switch (adapter->link_speed) {
1869 case SPEED_10:
1870 adapter->tx_timeout_factor = 16;
1871 break;
1872 case SPEED_100:
1873 /* maybe add some timeout factor ? */
1874 break;
1875 }
1876
1877 netif_carrier_on(netdev); 1820 netif_carrier_on(netdev);
1878 netif_wake_queue(netdev); 1821 netif_wake_queue(netdev);
1879 } 1822 }
@@ -1907,9 +1850,6 @@ static void igbvf_watchdog_task(struct work_struct *work)
1907 /* Cause software interrupt to ensure Rx ring is cleaned */ 1850 /* Cause software interrupt to ensure Rx ring is cleaned */
1908 ew32(EICS, adapter->rx_ring->eims_value); 1851 ew32(EICS, adapter->rx_ring->eims_value);
1909 1852
1910 /* Force detection of hung controller every watchdog period */
1911 adapter->detect_tx_hung = 1;
1912
1913 /* Reset the timer */ 1853 /* Reset the timer */
1914 if (!test_bit(__IGBVF_DOWN, &adapter->state)) 1854 if (!test_bit(__IGBVF_DOWN, &adapter->state))
1915 mod_timer(&adapter->watchdog_timer, 1855 mod_timer(&adapter->watchdog_timer,
@@ -2699,8 +2639,7 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
2699 hw->device_id = pdev->device; 2639 hw->device_id = pdev->device;
2700 hw->subsystem_vendor_id = pdev->subsystem_vendor; 2640 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2701 hw->subsystem_device_id = pdev->subsystem_device; 2641 hw->subsystem_device_id = pdev->subsystem_device;
2702 2642 hw->revision_id = pdev->revision;
2703 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
2704 2643
2705 err = -EIO; 2644 err = -EIO;
2706 adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0), 2645 adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0),
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index aa93655c3aa..a5b0f0e194b 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -2025,7 +2025,6 @@ static void ipg_init_mii(struct net_device *dev)
2025 2025
2026 if (phyaddr != 0x1f) { 2026 if (phyaddr != 0x1f) {
2027 u16 mii_phyctrl, mii_1000cr; 2027 u16 mii_phyctrl, mii_1000cr;
2028 u8 revisionid = 0;
2029 2028
2030 mii_1000cr = mdio_read(dev, phyaddr, MII_CTRL1000); 2029 mii_1000cr = mdio_read(dev, phyaddr, MII_CTRL1000);
2031 mii_1000cr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF | 2030 mii_1000cr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF |
@@ -2035,8 +2034,7 @@ static void ipg_init_mii(struct net_device *dev)
2035 mii_phyctrl = mdio_read(dev, phyaddr, MII_BMCR); 2034 mii_phyctrl = mdio_read(dev, phyaddr, MII_BMCR);
2036 2035
2037 /* Set default phyparam */ 2036 /* Set default phyparam */
2038 pci_read_config_byte(sp->pdev, PCI_REVISION_ID, &revisionid); 2037 ipg_set_phy_default_param(sp->pdev->revision, dev, phyaddr);
2039 ipg_set_phy_default_param(revisionid, dev, phyaddr);
2040 2038
2041 /* Reset PHY */ 2039 /* Reset PHY */
2042 mii_phyctrl |= BMCR_RESET | BMCR_ANRESTART; 2040 mii_phyctrl |= BMCR_RESET | BMCR_ANRESTART;
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 3b8c9246361..b60b81bc2b1 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -334,6 +334,10 @@ struct ixgbe_adapter {
334 u16 bd_number; 334 u16 bd_number;
335 struct work_struct reset_task; 335 struct work_struct reset_task;
336 struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; 336 struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
337
338 /* DCB parameters */
339 struct ieee_pfc *ixgbe_ieee_pfc;
340 struct ieee_ets *ixgbe_ieee_ets;
337 struct ixgbe_dcb_config dcb_cfg; 341 struct ixgbe_dcb_config dcb_cfg;
338 struct ixgbe_dcb_config temp_dcb_cfg; 342 struct ixgbe_dcb_config temp_dcb_cfg;
339 u8 dcb_set_bitmap; 343 u8 dcb_set_bitmap;
@@ -521,7 +525,6 @@ extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
521extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16); 525extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
522extern void ixgbe_write_eitr(struct ixgbe_q_vector *); 526extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
523extern int ethtool_ioctl(struct ifreq *ifr); 527extern int ethtool_ioctl(struct ifreq *ifr);
524extern u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 index);
525extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); 528extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
526extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc); 529extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc);
527extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc); 530extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc);
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index d0f1d9d2c41..fc41329399b 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -627,7 +627,6 @@ out:
627 return 0; 627 return 0;
628} 628}
629 629
630
631/** 630/**
632 * ixgbe_setup_mac_link_82598 - Set MAC link speed 631 * ixgbe_setup_mac_link_82598 - Set MAC link speed
633 * @hw: pointer to hardware structure 632 * @hw: pointer to hardware structure
@@ -698,7 +697,6 @@ static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
698 /* Setup the PHY according to input speed */ 697 /* Setup the PHY according to input speed */
699 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, 698 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
700 autoneg_wait_to_complete); 699 autoneg_wait_to_complete);
701
702 /* Set up MAC */ 700 /* Set up MAC */
703 ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete); 701 ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
704 702
@@ -770,7 +768,6 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
770 else if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT) 768 else if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
771 goto no_phy_reset; 769 goto no_phy_reset;
772 770
773
774 hw->phy.ops.reset(hw); 771 hw->phy.ops.reset(hw);
775 } 772 }
776 773
@@ -779,12 +776,9 @@ no_phy_reset:
779 * Prevent the PCI-E bus from from hanging by disabling PCI-E master 776 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
780 * access and verify no pending requests before reset 777 * access and verify no pending requests before reset
781 */ 778 */
782 status = ixgbe_disable_pcie_master(hw); 779 ixgbe_disable_pcie_master(hw);
783 if (status != 0) {
784 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
785 hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
786 }
787 780
781mac_reset_top:
788 /* 782 /*
789 * Issue global reset to the MAC. This needs to be a SW reset. 783 * Issue global reset to the MAC. This needs to be a SW reset.
790 * If link reset is used, it might reset the MAC when mng is using it 784 * If link reset is used, it might reset the MAC when mng is using it
@@ -805,6 +799,19 @@ no_phy_reset:
805 hw_dbg(hw, "Reset polling failed to complete.\n"); 799 hw_dbg(hw, "Reset polling failed to complete.\n");
806 } 800 }
807 801
802 /*
803 * Double resets are required for recovery from certain error
804 * conditions. Between resets, it is necessary to stall to allow time
805 * for any pending HW events to complete. We use 1usec since that is
806 * what is needed for ixgbe_disable_pcie_master(). The second reset
807 * then clears out any effects of those events.
808 */
809 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
810 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
811 udelay(1);
812 goto mac_reset_top;
813 }
814
808 msleep(50); 815 msleep(50);
809 816
810 gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR); 817 gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
@@ -824,15 +831,15 @@ no_phy_reset:
824 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc); 831 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
825 } 832 }
826 833
834 /* Store the permanent mac address */
835 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
836
827 /* 837 /*
828 * Store MAC address from RAR0, clear receive address registers, and 838 * Store MAC address from RAR0, clear receive address registers, and
829 * clear the multicast table 839 * clear the multicast table
830 */ 840 */
831 hw->mac.ops.init_rx_addrs(hw); 841 hw->mac.ops.init_rx_addrs(hw);
832 842
833 /* Store the permanent mac address */
834 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
835
836reset_hw_out: 843reset_hw_out:
837 if (phy_status) 844 if (phy_status)
838 status = phy_status; 845 status = phy_status;
@@ -849,6 +856,13 @@ reset_hw_out:
849static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 856static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
850{ 857{
851 u32 rar_high; 858 u32 rar_high;
859 u32 rar_entries = hw->mac.num_rar_entries;
860
861 /* Make sure we are using a valid rar index range */
862 if (rar >= rar_entries) {
863 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
864 return IXGBE_ERR_INVALID_ARGUMENT;
865 }
852 866
853 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); 867 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
854 rar_high &= ~IXGBE_RAH_VIND_MASK; 868 rar_high &= ~IXGBE_RAH_VIND_MASK;
@@ -868,14 +882,17 @@ static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
868 u32 rar_high; 882 u32 rar_high;
869 u32 rar_entries = hw->mac.num_rar_entries; 883 u32 rar_entries = hw->mac.num_rar_entries;
870 884
871 if (rar < rar_entries) { 885
872 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); 886 /* Make sure we are using a valid rar index range */
873 if (rar_high & IXGBE_RAH_VIND_MASK) { 887 if (rar >= rar_entries) {
874 rar_high &= ~IXGBE_RAH_VIND_MASK;
875 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
876 }
877 } else {
878 hw_dbg(hw, "RAR index %d is out of range.\n", rar); 888 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
889 return IXGBE_ERR_INVALID_ARGUMENT;
890 }
891
892 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
893 if (rar_high & IXGBE_RAH_VIND_MASK) {
894 rar_high &= ~IXGBE_RAH_VIND_MASK;
895 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
879 } 896 }
880 897
881 return 0; 898 return 0;
@@ -994,13 +1011,12 @@ static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
994} 1011}
995 1012
996/** 1013/**
997 * ixgbe_read_i2c_eeprom_82598 - Read 8 bit EEPROM word of an SFP+ module 1014 * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
998 * over I2C interface through an intermediate phy.
999 * @hw: pointer to hardware structure 1015 * @hw: pointer to hardware structure
1000 * @byte_offset: EEPROM byte offset to read 1016 * @byte_offset: EEPROM byte offset to read
1001 * @eeprom_data: value read 1017 * @eeprom_data: value read
1002 * 1018 *
1003 * Performs byte read operation to SFP module's EEPROM over I2C interface. 1019 * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1004 **/ 1020 **/
1005static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, 1021static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
1006 u8 *eeprom_data) 1022 u8 *eeprom_data)
@@ -1179,13 +1195,14 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
1179 .set_vmdq = &ixgbe_set_vmdq_82598, 1195 .set_vmdq = &ixgbe_set_vmdq_82598,
1180 .clear_vmdq = &ixgbe_clear_vmdq_82598, 1196 .clear_vmdq = &ixgbe_clear_vmdq_82598,
1181 .init_rx_addrs = &ixgbe_init_rx_addrs_generic, 1197 .init_rx_addrs = &ixgbe_init_rx_addrs_generic,
1182 .update_uc_addr_list = &ixgbe_update_uc_addr_list_generic,
1183 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, 1198 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
1184 .enable_mc = &ixgbe_enable_mc_generic, 1199 .enable_mc = &ixgbe_enable_mc_generic,
1185 .disable_mc = &ixgbe_disable_mc_generic, 1200 .disable_mc = &ixgbe_disable_mc_generic,
1186 .clear_vfta = &ixgbe_clear_vfta_82598, 1201 .clear_vfta = &ixgbe_clear_vfta_82598,
1187 .set_vfta = &ixgbe_set_vfta_82598, 1202 .set_vfta = &ixgbe_set_vfta_82598,
1188 .fc_enable = &ixgbe_fc_enable_82598, 1203 .fc_enable = &ixgbe_fc_enable_82598,
1204 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync,
1205 .release_swfw_sync = &ixgbe_release_swfw_sync,
1189}; 1206};
1190 1207
1191static struct ixgbe_eeprom_operations eeprom_ops_82598 = { 1208static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index a21f5817685..5ef968a10d4 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -112,7 +112,8 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
112 goto setup_sfp_out; 112 goto setup_sfp_out;
113 113
114 /* PHY config will finish before releasing the semaphore */ 114 /* PHY config will finish before releasing the semaphore */
115 ret_val = ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); 115 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
116 IXGBE_GSSR_MAC_CSR_SM);
116 if (ret_val != 0) { 117 if (ret_val != 0) {
117 ret_val = IXGBE_ERR_SWFW_SYNC; 118 ret_val = IXGBE_ERR_SWFW_SYNC;
118 goto setup_sfp_out; 119 goto setup_sfp_out;
@@ -329,11 +330,14 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
329 enum ixgbe_media_type media_type; 330 enum ixgbe_media_type media_type;
330 331
331 /* Detect if there is a copper PHY attached. */ 332 /* Detect if there is a copper PHY attached. */
332 if (hw->phy.type == ixgbe_phy_cu_unknown || 333 switch (hw->phy.type) {
333 hw->phy.type == ixgbe_phy_tn || 334 case ixgbe_phy_cu_unknown:
334 hw->phy.type == ixgbe_phy_aq) { 335 case ixgbe_phy_tn:
336 case ixgbe_phy_aq:
335 media_type = ixgbe_media_type_copper; 337 media_type = ixgbe_media_type_copper;
336 goto out; 338 goto out;
339 default:
340 break;
337 } 341 }
338 342
339 switch (hw->device_id) { 343 switch (hw->device_id) {
@@ -354,6 +358,9 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
354 case IXGBE_DEV_ID_82599_CX4: 358 case IXGBE_DEV_ID_82599_CX4:
355 media_type = ixgbe_media_type_cx4; 359 media_type = ixgbe_media_type_cx4;
356 break; 360 break;
361 case IXGBE_DEV_ID_82599_T3_LOM:
362 media_type = ixgbe_media_type_copper;
363 break;
357 default: 364 default:
358 media_type = ixgbe_media_type_unknown; 365 media_type = ixgbe_media_type_unknown;
359 break; 366 break;
@@ -411,14 +418,14 @@ static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
411 return status; 418 return status;
412} 419}
413 420
414 /** 421/**
415 * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser 422 * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
416 * @hw: pointer to hardware structure 423 * @hw: pointer to hardware structure
417 * 424 *
418 * The base drivers may require better control over SFP+ module 425 * The base drivers may require better control over SFP+ module
419 * PHY states. This includes selectively shutting down the Tx 426 * PHY states. This includes selectively shutting down the Tx
420 * laser on the PHY, effectively halting physical link. 427 * laser on the PHY, effectively halting physical link.
421 **/ 428 **/
422static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 429static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
423{ 430{
424 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 431 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
@@ -536,7 +543,6 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
536 * Section 73.10.2, we may have to wait up to 500ms if KR is 543 * Section 73.10.2, we may have to wait up to 500ms if KR is
537 * attempted. 82599 uses the same timing for 10g SFI. 544 * attempted. 82599 uses the same timing for 10g SFI.
538 */ 545 */
539
540 for (i = 0; i < 5; i++) { 546 for (i = 0; i < 5; i++) {
541 /* Wait for the link partner to also set speed */ 547 /* Wait for the link partner to also set speed */
542 msleep(100); 548 msleep(100);
@@ -761,7 +767,6 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
761 else 767 else
762 orig_autoc = autoc; 768 orig_autoc = autoc;
763 769
764
765 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || 770 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
766 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || 771 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
767 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { 772 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
@@ -898,12 +903,9 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
898 * Prevent the PCI-E bus from from hanging by disabling PCI-E master 903 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
899 * access and verify no pending requests before reset 904 * access and verify no pending requests before reset
900 */ 905 */
901 status = ixgbe_disable_pcie_master(hw); 906 ixgbe_disable_pcie_master(hw);
902 if (status != 0) {
903 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
904 hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
905 }
906 907
908mac_reset_top:
907 /* 909 /*
908 * Issue global reset to the MAC. This needs to be a SW reset. 910 * Issue global reset to the MAC. This needs to be a SW reset.
909 * If link reset is used, it might reset the MAC when mng is using it 911 * If link reset is used, it might reset the MAC when mng is using it
@@ -924,6 +926,19 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
924 hw_dbg(hw, "Reset polling failed to complete.\n"); 926 hw_dbg(hw, "Reset polling failed to complete.\n");
925 } 927 }
926 928
929 /*
930 * Double resets are required for recovery from certain error
931 * conditions. Between resets, it is necessary to stall to allow time
932 * for any pending HW events to complete. We use 1usec since that is
933 * what is needed for ixgbe_disable_pcie_master(). The second reset
934 * then clears out any effects of those events.
935 */
936 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
937 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
938 udelay(1);
939 goto mac_reset_top;
940 }
941
927 msleep(50); 942 msleep(50);
928 943
929 /* 944 /*
@@ -951,6 +966,9 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
951 } 966 }
952 } 967 }
953 968
969 /* Store the permanent mac address */
970 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
971
954 /* 972 /*
955 * Store MAC address from RAR0, clear receive address registers, and 973 * Store MAC address from RAR0, clear receive address registers, and
956 * clear the multicast table. Also reset num_rar_entries to 128, 974 * clear the multicast table. Also reset num_rar_entries to 128,
@@ -959,9 +977,6 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
959 hw->mac.num_rar_entries = 128; 977 hw->mac.num_rar_entries = 128;
960 hw->mac.ops.init_rx_addrs(hw); 978 hw->mac.ops.init_rx_addrs(hw);
961 979
962 /* Store the permanent mac address */
963 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
964
965 /* Store the permanent SAN mac address */ 980 /* Store the permanent SAN mac address */
966 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); 981 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
967 982
@@ -1733,13 +1748,34 @@ static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
1733 * @hw: pointer to hardware structure 1748 * @hw: pointer to hardware structure
1734 * 1749 *
1735 * Determines the physical layer module found on the current adapter. 1750 * Determines the physical layer module found on the current adapter.
1751 * If PHY already detected, maintains current PHY type in hw struct,
1752 * otherwise executes the PHY detection routine.
1736 **/ 1753 **/
1737static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) 1754s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
1738{ 1755{
1739 s32 status = IXGBE_ERR_PHY_ADDR_INVALID; 1756 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
1757
1758 /* Detect PHY if not unknown - returns success if already detected. */
1740 status = ixgbe_identify_phy_generic(hw); 1759 status = ixgbe_identify_phy_generic(hw);
1741 if (status != 0) 1760 if (status != 0) {
1742 status = ixgbe_identify_sfp_module_generic(hw); 1761 /* 82599 10GBASE-T requires an external PHY */
1762 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
1763 goto out;
1764 else
1765 status = ixgbe_identify_sfp_module_generic(hw);
1766 }
1767
1768 /* Set PHY type none if no PHY detected */
1769 if (hw->phy.type == ixgbe_phy_unknown) {
1770 hw->phy.type = ixgbe_phy_none;
1771 status = 0;
1772 }
1773
1774 /* Return error if SFP module has been detected but is not supported */
1775 if (hw->phy.type == ixgbe_phy_sfp_unsupported)
1776 status = IXGBE_ERR_SFP_NOT_SUPPORTED;
1777
1778out:
1743 return status; 1779 return status;
1744} 1780}
1745 1781
@@ -1763,11 +1799,12 @@ static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
1763 1799
1764 hw->phy.ops.identify(hw); 1800 hw->phy.ops.identify(hw);
1765 1801
1766 if (hw->phy.type == ixgbe_phy_tn || 1802 switch (hw->phy.type) {
1767 hw->phy.type == ixgbe_phy_aq || 1803 case ixgbe_phy_tn:
1768 hw->phy.type == ixgbe_phy_cu_unknown) { 1804 case ixgbe_phy_aq:
1805 case ixgbe_phy_cu_unknown:
1769 hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD, 1806 hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD,
1770 &ext_ability); 1807 &ext_ability);
1771 if (ext_ability & MDIO_PMA_EXTABLE_10GBT) 1808 if (ext_ability & MDIO_PMA_EXTABLE_10GBT)
1772 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; 1809 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1773 if (ext_ability & MDIO_PMA_EXTABLE_1000BT) 1810 if (ext_ability & MDIO_PMA_EXTABLE_1000BT)
@@ -1775,6 +1812,8 @@ static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
1775 if (ext_ability & MDIO_PMA_EXTABLE_100BTX) 1812 if (ext_ability & MDIO_PMA_EXTABLE_100BTX)
1776 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; 1813 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1777 goto out; 1814 goto out;
1815 default:
1816 break;
1778 } 1817 }
1779 1818
1780 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 1819 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
@@ -1886,6 +1925,7 @@ static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
1886 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY) 1925 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
1887 break; 1926 break;
1888 else 1927 else
1928 /* Use interrupt-safe sleep just in case */
1889 udelay(10); 1929 udelay(10);
1890 } 1930 }
1891 1931
@@ -1995,7 +2035,6 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
1995 .set_vmdq = &ixgbe_set_vmdq_generic, 2035 .set_vmdq = &ixgbe_set_vmdq_generic,
1996 .clear_vmdq = &ixgbe_clear_vmdq_generic, 2036 .clear_vmdq = &ixgbe_clear_vmdq_generic,
1997 .init_rx_addrs = &ixgbe_init_rx_addrs_generic, 2037 .init_rx_addrs = &ixgbe_init_rx_addrs_generic,
1998 .update_uc_addr_list = &ixgbe_update_uc_addr_list_generic,
1999 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, 2038 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
2000 .enable_mc = &ixgbe_enable_mc_generic, 2039 .enable_mc = &ixgbe_enable_mc_generic,
2001 .disable_mc = &ixgbe_disable_mc_generic, 2040 .disable_mc = &ixgbe_disable_mc_generic,
@@ -2006,6 +2045,9 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
2006 .setup_sfp = &ixgbe_setup_sfp_modules_82599, 2045 .setup_sfp = &ixgbe_setup_sfp_modules_82599,
2007 .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, 2046 .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing,
2008 .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, 2047 .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
2048 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync,
2049 .release_swfw_sync = &ixgbe_release_swfw_sync,
2050
2009}; 2051};
2010 2052
2011static struct ixgbe_eeprom_operations eeprom_ops_82599 = { 2053static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index ebbda7d1525..a7fb2e00f76 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -46,10 +46,7 @@ static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
46static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 46static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
47static void ixgbe_release_eeprom(struct ixgbe_hw *hw); 47static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
48 48
49static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index);
50static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index);
51static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); 49static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
52static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
53static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num); 50static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num);
54 51
55/** 52/**
@@ -454,8 +451,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
454 * Prevent the PCI-E bus from from hanging by disabling PCI-E master 451 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
455 * access and verify no pending requests 452 * access and verify no pending requests
456 */ 453 */
457 if (ixgbe_disable_pcie_master(hw) != 0) 454 ixgbe_disable_pcie_master(hw);
458 hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
459 455
460 return 0; 456 return 0;
461} 457}
@@ -603,7 +599,6 @@ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
603 ixgbe_shift_out_eeprom_bits(hw, data, 16); 599 ixgbe_shift_out_eeprom_bits(hw, data, 16);
604 ixgbe_standby_eeprom(hw); 600 ixgbe_standby_eeprom(hw);
605 601
606 msleep(hw->eeprom.semaphore_delay);
607 /* Done with writing - release the EEPROM */ 602 /* Done with writing - release the EEPROM */
608 ixgbe_release_eeprom(hw); 603 ixgbe_release_eeprom(hw);
609 } 604 }
@@ -747,10 +742,10 @@ s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
747static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) 742static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
748{ 743{
749 s32 status = 0; 744 s32 status = 0;
750 u32 eec = 0; 745 u32 eec;
751 u32 i; 746 u32 i;
752 747
753 if (ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0) 748 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0)
754 status = IXGBE_ERR_SWFW_SYNC; 749 status = IXGBE_ERR_SWFW_SYNC;
755 750
756 if (status == 0) { 751 if (status == 0) {
@@ -773,18 +768,18 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
773 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 768 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
774 hw_dbg(hw, "Could not acquire EEPROM grant\n"); 769 hw_dbg(hw, "Could not acquire EEPROM grant\n");
775 770
776 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 771 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
777 status = IXGBE_ERR_EEPROM; 772 status = IXGBE_ERR_EEPROM;
778 } 773 }
779 }
780 774
781 /* Setup EEPROM for Read/Write */ 775 /* Setup EEPROM for Read/Write */
782 if (status == 0) { 776 if (status == 0) {
783 /* Clear CS and SK */ 777 /* Clear CS and SK */
784 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK); 778 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
785 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 779 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
786 IXGBE_WRITE_FLUSH(hw); 780 IXGBE_WRITE_FLUSH(hw);
787 udelay(1); 781 udelay(1);
782 }
788 } 783 }
789 return status; 784 return status;
790} 785}
@@ -798,13 +793,10 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
798static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) 793static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
799{ 794{
800 s32 status = IXGBE_ERR_EEPROM; 795 s32 status = IXGBE_ERR_EEPROM;
801 u32 timeout; 796 u32 timeout = 2000;
802 u32 i; 797 u32 i;
803 u32 swsm; 798 u32 swsm;
804 799
805 /* Set timeout value based on size of EEPROM */
806 timeout = hw->eeprom.word_size + 1;
807
808 /* Get SMBI software semaphore between device drivers first */ 800 /* Get SMBI software semaphore between device drivers first */
809 for (i = 0; i < timeout; i++) { 801 for (i = 0; i < timeout; i++) {
810 /* 802 /*
@@ -816,7 +808,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
816 status = 0; 808 status = 0;
817 break; 809 break;
818 } 810 }
819 msleep(1); 811 udelay(50);
820 } 812 }
821 813
822 /* Now get the semaphore between SW/FW through the SWESMBI bit */ 814 /* Now get the semaphore between SW/FW through the SWESMBI bit */
@@ -844,11 +836,14 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
844 * was not granted because we don't have access to the EEPROM 836 * was not granted because we don't have access to the EEPROM
845 */ 837 */
846 if (i >= timeout) { 838 if (i >= timeout) {
847 hw_dbg(hw, "Driver can't access the Eeprom - Semaphore " 839 hw_dbg(hw, "SWESMBI Software EEPROM semaphore "
848 "not granted.\n"); 840 "not granted.\n");
849 ixgbe_release_eeprom_semaphore(hw); 841 ixgbe_release_eeprom_semaphore(hw);
850 status = IXGBE_ERR_EEPROM; 842 status = IXGBE_ERR_EEPROM;
851 } 843 }
844 } else {
845 hw_dbg(hw, "Software semaphore SMBI between device drivers "
846 "not granted.\n");
852 } 847 }
853 848
854 return status; 849 return status;
@@ -1081,10 +1076,13 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
1081 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 1076 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1082 1077
1083 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 1078 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1079
1080 /* Delay before attempt to obtain semaphore again to allow FW access */
1081 msleep(hw->eeprom.semaphore_delay);
1084} 1082}
1085 1083
1086/** 1084/**
1087 * ixgbe_calc_eeprom_checksum - Calculates and returns the checksum 1085 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
1088 * @hw: pointer to hardware structure 1086 * @hw: pointer to hardware structure
1089 **/ 1087 **/
1090u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) 1088u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
@@ -1190,7 +1188,7 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
1190 if (status == 0) { 1188 if (status == 0) {
1191 checksum = hw->eeprom.ops.calc_checksum(hw); 1189 checksum = hw->eeprom.ops.calc_checksum(hw);
1192 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, 1190 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
1193 checksum); 1191 checksum);
1194 } else { 1192 } else {
1195 hw_dbg(hw, "EEPROM read failed\n"); 1193 hw_dbg(hw, "EEPROM read failed\n");
1196 } 1194 }
@@ -1238,37 +1236,37 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
1238 u32 rar_low, rar_high; 1236 u32 rar_low, rar_high;
1239 u32 rar_entries = hw->mac.num_rar_entries; 1237 u32 rar_entries = hw->mac.num_rar_entries;
1240 1238
1239 /* Make sure we are using a valid rar index range */
1240 if (index >= rar_entries) {
1241 hw_dbg(hw, "RAR index %d is out of range.\n", index);
1242 return IXGBE_ERR_INVALID_ARGUMENT;
1243 }
1244
1241 /* setup VMDq pool selection before this RAR gets enabled */ 1245 /* setup VMDq pool selection before this RAR gets enabled */
1242 hw->mac.ops.set_vmdq(hw, index, vmdq); 1246 hw->mac.ops.set_vmdq(hw, index, vmdq);
1243 1247
1244 /* Make sure we are using a valid rar index range */ 1248 /*
1245 if (index < rar_entries) { 1249 * HW expects these in little endian so we reverse the byte
1246 /* 1250 * order from network order (big endian) to little endian
1247 * HW expects these in little endian so we reverse the byte 1251 */
1248 * order from network order (big endian) to little endian 1252 rar_low = ((u32)addr[0] |
1249 */ 1253 ((u32)addr[1] << 8) |
1250 rar_low = ((u32)addr[0] | 1254 ((u32)addr[2] << 16) |
1251 ((u32)addr[1] << 8) | 1255 ((u32)addr[3] << 24));
1252 ((u32)addr[2] << 16) | 1256 /*
1253 ((u32)addr[3] << 24)); 1257 * Some parts put the VMDq setting in the extra RAH bits,
1254 /* 1258 * so save everything except the lower 16 bits that hold part
1255 * Some parts put the VMDq setting in the extra RAH bits, 1259 * of the address and the address valid bit.
1256 * so save everything except the lower 16 bits that hold part 1260 */
1257 * of the address and the address valid bit. 1261 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1258 */ 1262 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1259 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 1263 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
1260 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1261 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
1262 1264
1263 if (enable_addr != 0) 1265 if (enable_addr != 0)
1264 rar_high |= IXGBE_RAH_AV; 1266 rar_high |= IXGBE_RAH_AV;
1265 1267
1266 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); 1268 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
1267 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 1269 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1268 } else {
1269 hw_dbg(hw, "RAR index %d is out of range.\n", index);
1270 return IXGBE_ERR_RAR_INDEX;
1271 }
1272 1270
1273 return 0; 1271 return 0;
1274} 1272}
@@ -1286,58 +1284,26 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
1286 u32 rar_entries = hw->mac.num_rar_entries; 1284 u32 rar_entries = hw->mac.num_rar_entries;
1287 1285
1288 /* Make sure we are using a valid rar index range */ 1286 /* Make sure we are using a valid rar index range */
1289 if (index < rar_entries) { 1287 if (index >= rar_entries) {
1290 /*
1291 * Some parts put the VMDq setting in the extra RAH bits,
1292 * so save everything except the lower 16 bits that hold part
1293 * of the address and the address valid bit.
1294 */
1295 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1296 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1297
1298 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
1299 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1300 } else {
1301 hw_dbg(hw, "RAR index %d is out of range.\n", index); 1288 hw_dbg(hw, "RAR index %d is out of range.\n", index);
1302 return IXGBE_ERR_RAR_INDEX; 1289 return IXGBE_ERR_INVALID_ARGUMENT;
1303 } 1290 }
1304 1291
1305 /* clear VMDq pool/queue selection for this RAR */ 1292 /*
1306 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); 1293 * Some parts put the VMDq setting in the extra RAH bits,
1307 1294 * so save everything except the lower 16 bits that hold part
1308 return 0; 1295 * of the address and the address valid bit.
1309} 1296 */
1310
1311/**
1312 * ixgbe_enable_rar - Enable Rx address register
1313 * @hw: pointer to hardware structure
1314 * @index: index into the RAR table
1315 *
1316 * Enables the select receive address register.
1317 **/
1318static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index)
1319{
1320 u32 rar_high;
1321
1322 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 1297 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1323 rar_high |= IXGBE_RAH_AV; 1298 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1299
1300 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
1324 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 1301 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1325}
1326 1302
1327/** 1303 /* clear VMDq pool/queue selection for this RAR */
1328 * ixgbe_disable_rar - Disable Rx address register 1304 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
1329 * @hw: pointer to hardware structure
1330 * @index: index into the RAR table
1331 *
1332 * Disables the select receive address register.
1333 **/
1334static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index)
1335{
1336 u32 rar_high;
1337 1305
1338 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 1306 return 0;
1339 rar_high &= (~IXGBE_RAH_AV);
1340 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1341} 1307}
1342 1308
1343/** 1309/**
@@ -1386,7 +1352,6 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
1386 } 1352 }
1387 1353
1388 /* Clear the MTA */ 1354 /* Clear the MTA */
1389 hw->addr_ctrl.mc_addr_in_rar_count = 0;
1390 hw->addr_ctrl.mta_in_use = 0; 1355 hw->addr_ctrl.mta_in_use = 0;
1391 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 1356 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
1392 1357
@@ -1401,105 +1366,6 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
1401} 1366}
1402 1367
1403/** 1368/**
1404 * ixgbe_add_uc_addr - Adds a secondary unicast address.
1405 * @hw: pointer to hardware structure
1406 * @addr: new address
1407 *
1408 * Adds it to unused receive address register or goes into promiscuous mode.
1409 **/
1410static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
1411{
1412 u32 rar_entries = hw->mac.num_rar_entries;
1413 u32 rar;
1414
1415 hw_dbg(hw, " UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
1416 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
1417
1418 /*
1419 * Place this address in the RAR if there is room,
1420 * else put the controller into promiscuous mode
1421 */
1422 if (hw->addr_ctrl.rar_used_count < rar_entries) {
1423 rar = hw->addr_ctrl.rar_used_count -
1424 hw->addr_ctrl.mc_addr_in_rar_count;
1425 hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
1426 hw_dbg(hw, "Added a secondary address to RAR[%d]\n", rar);
1427 hw->addr_ctrl.rar_used_count++;
1428 } else {
1429 hw->addr_ctrl.overflow_promisc++;
1430 }
1431
1432 hw_dbg(hw, "ixgbe_add_uc_addr Complete\n");
1433}
1434
1435/**
1436 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
1437 * @hw: pointer to hardware structure
1438 * @netdev: pointer to net device structure
1439 *
1440 * The given list replaces any existing list. Clears the secondary addrs from
1441 * receive address registers. Uses unused receive address registers for the
1442 * first secondary addresses, and falls back to promiscuous mode as needed.
1443 *
1444 * Drivers using secondary unicast addresses must set user_set_promisc when
1445 * manually putting the device into promiscuous mode.
1446 **/
1447s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
1448 struct net_device *netdev)
1449{
1450 u32 i;
1451 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
1452 u32 uc_addr_in_use;
1453 u32 fctrl;
1454 struct netdev_hw_addr *ha;
1455
1456 /*
1457 * Clear accounting of old secondary address list,
1458 * don't count RAR[0]
1459 */
1460 uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
1461 hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
1462 hw->addr_ctrl.overflow_promisc = 0;
1463
1464 /* Zero out the other receive addresses */
1465 hw_dbg(hw, "Clearing RAR[1-%d]\n", uc_addr_in_use + 1);
1466 for (i = 0; i < uc_addr_in_use; i++) {
1467 IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
1468 IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
1469 }
1470
1471 /* Add the new addresses */
1472 netdev_for_each_uc_addr(ha, netdev) {
1473 hw_dbg(hw, " Adding the secondary addresses:\n");
1474 ixgbe_add_uc_addr(hw, ha->addr, 0);
1475 }
1476
1477 if (hw->addr_ctrl.overflow_promisc) {
1478 /* enable promisc if not already in overflow or set by user */
1479 if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
1480 hw_dbg(hw, " Entering address overflow promisc mode\n");
1481 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1482 fctrl |= IXGBE_FCTRL_UPE;
1483 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1484 hw->addr_ctrl.uc_set_promisc = true;
1485 }
1486 } else {
1487 /* only disable if set by overflow, not by user */
1488 if ((old_promisc_setting && hw->addr_ctrl.uc_set_promisc) &&
1489 !(hw->addr_ctrl.user_set_promisc)) {
1490 hw_dbg(hw, " Leaving address overflow promisc mode\n");
1491 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1492 fctrl &= ~IXGBE_FCTRL_UPE;
1493 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1494 hw->addr_ctrl.uc_set_promisc = false;
1495 }
1496 }
1497
1498 hw_dbg(hw, "ixgbe_update_uc_addr_list_generic Complete\n");
1499 return 0;
1500}
1501
1502/**
1503 * ixgbe_mta_vector - Determines bit-vector in multicast table to set 1369 * ixgbe_mta_vector - Determines bit-vector in multicast table to set
1504 * @hw: pointer to hardware structure 1370 * @hw: pointer to hardware structure
1505 * @mc_addr: the multicast address 1371 * @mc_addr: the multicast address
@@ -1550,7 +1416,6 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
1550 u32 vector; 1416 u32 vector;
1551 u32 vector_bit; 1417 u32 vector_bit;
1552 u32 vector_reg; 1418 u32 vector_reg;
1553 u32 mta_reg;
1554 1419
1555 hw->addr_ctrl.mta_in_use++; 1420 hw->addr_ctrl.mta_in_use++;
1556 1421
@@ -1568,9 +1433,7 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
1568 */ 1433 */
1569 vector_reg = (vector >> 5) & 0x7F; 1434 vector_reg = (vector >> 5) & 0x7F;
1570 vector_bit = vector & 0x1F; 1435 vector_bit = vector & 0x1F;
1571 mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); 1436 hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
1572 mta_reg |= (1 << vector_bit);
1573 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
1574} 1437}
1575 1438
1576/** 1439/**
@@ -1596,18 +1459,21 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
1596 hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev); 1459 hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev);
1597 hw->addr_ctrl.mta_in_use = 0; 1460 hw->addr_ctrl.mta_in_use = 0;
1598 1461
1599 /* Clear the MTA */ 1462 /* Clear mta_shadow */
1600 hw_dbg(hw, " Clearing MTA\n"); 1463 hw_dbg(hw, " Clearing MTA\n");
1601 for (i = 0; i < hw->mac.mcft_size; i++) 1464 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
1602 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
1603 1465
1604 /* Add the new addresses */ 1466 /* Update mta shadow */
1605 netdev_for_each_mc_addr(ha, netdev) { 1467 netdev_for_each_mc_addr(ha, netdev) {
1606 hw_dbg(hw, " Adding the multicast addresses:\n"); 1468 hw_dbg(hw, " Adding the multicast addresses:\n");
1607 ixgbe_set_mta(hw, ha->addr); 1469 ixgbe_set_mta(hw, ha->addr);
1608 } 1470 }
1609 1471
1610 /* Enable mta */ 1472 /* Enable mta */
1473 for (i = 0; i < hw->mac.mcft_size; i++)
1474 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
1475 hw->mac.mta_shadow[i]);
1476
1611 if (hw->addr_ctrl.mta_in_use > 0) 1477 if (hw->addr_ctrl.mta_in_use > 0)
1612 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, 1478 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
1613 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); 1479 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
@@ -1624,15 +1490,8 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
1624 **/ 1490 **/
1625s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw) 1491s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
1626{ 1492{
1627 u32 i;
1628 u32 rar_entries = hw->mac.num_rar_entries;
1629 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; 1493 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
1630 1494
1631 if (a->mc_addr_in_rar_count > 0)
1632 for (i = (rar_entries - a->mc_addr_in_rar_count);
1633 i < rar_entries; i++)
1634 ixgbe_enable_rar(hw, i);
1635
1636 if (a->mta_in_use > 0) 1495 if (a->mta_in_use > 0)
1637 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | 1496 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
1638 hw->mac.mc_filter_type); 1497 hw->mac.mc_filter_type);
@@ -1648,15 +1507,8 @@ s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
1648 **/ 1507 **/
1649s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw) 1508s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
1650{ 1509{
1651 u32 i;
1652 u32 rar_entries = hw->mac.num_rar_entries;
1653 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; 1510 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
1654 1511
1655 if (a->mc_addr_in_rar_count > 0)
1656 for (i = (rar_entries - a->mc_addr_in_rar_count);
1657 i < rar_entries; i++)
1658 ixgbe_disable_rar(hw, i);
1659
1660 if (a->mta_in_use > 0) 1512 if (a->mta_in_use > 0)
1661 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 1513 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
1662 1514
@@ -1703,7 +1555,9 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
1703 * 2: Tx flow control is enabled (we can send pause frames but 1555 * 2: Tx flow control is enabled (we can send pause frames but
1704 * we do not support receiving pause frames). 1556 * we do not support receiving pause frames).
1705 * 3: Both Rx and Tx flow control (symmetric) are enabled. 1557 * 3: Both Rx and Tx flow control (symmetric) are enabled.
1558#ifdef CONFIG_DCB
1706 * 4: Priority Flow Control is enabled. 1559 * 4: Priority Flow Control is enabled.
1560#endif
1707 * other: Invalid. 1561 * other: Invalid.
1708 */ 1562 */
1709 switch (hw->fc.current_mode) { 1563 switch (hw->fc.current_mode) {
@@ -2159,10 +2013,16 @@ out:
2159 **/ 2013 **/
2160s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) 2014s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2161{ 2015{
2016 struct ixgbe_adapter *adapter = hw->back;
2162 u32 i; 2017 u32 i;
2163 u32 reg_val; 2018 u32 reg_val;
2164 u32 number_of_queues; 2019 u32 number_of_queues;
2165 s32 status = IXGBE_ERR_MASTER_REQUESTS_PENDING; 2020 s32 status = 0;
2021 u16 dev_status = 0;
2022
2023 /* Just jump out if bus mastering is already disabled */
2024 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2025 goto out;
2166 2026
2167 /* Disable the receive unit by stopping each queue */ 2027 /* Disable the receive unit by stopping each queue */
2168 number_of_queues = hw->mac.max_rx_queues; 2028 number_of_queues = hw->mac.max_rx_queues;
@@ -2179,13 +2039,43 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2179 IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val); 2039 IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val);
2180 2040
2181 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { 2041 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2182 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) { 2042 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2183 status = 0; 2043 goto check_device_status;
2044 udelay(100);
2045 }
2046
2047 hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n");
2048 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
2049
2050 /*
2051 * Before proceeding, make sure that the PCIe block does not have
2052 * transactions pending.
2053 */
2054check_device_status:
2055 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2056 pci_read_config_word(adapter->pdev, IXGBE_PCI_DEVICE_STATUS,
2057 &dev_status);
2058 if (!(dev_status & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
2184 break; 2059 break;
2185 }
2186 udelay(100); 2060 udelay(100);
2187 } 2061 }
2188 2062
2063 if (i == IXGBE_PCI_MASTER_DISABLE_TIMEOUT)
2064 hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n");
2065 else
2066 goto out;
2067
2068 /*
2069 * Two consecutive resets are required via CTRL.RST per datasheet
2070 * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
2071 * of this need. The first reset prevents new master requests from
2072 * being issued by our device. We then must wait 1usec for any
2073 * remaining completions from the PCIe bus to trickle in, and then reset
2074 * again to clear out any effects they may have had on our device.
2075 */
2076 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2077
2078out:
2189 return status; 2079 return status;
2190} 2080}
2191 2081
@@ -2206,6 +2096,10 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2206 s32 timeout = 200; 2096 s32 timeout = 200;
2207 2097
2208 while (timeout) { 2098 while (timeout) {
2099 /*
2100 * SW EEPROM semaphore bit is used for access to all
2101 * SW_FW_SYNC/GSSR bits (not just EEPROM)
2102 */
2209 if (ixgbe_get_eeprom_semaphore(hw)) 2103 if (ixgbe_get_eeprom_semaphore(hw))
2210 return IXGBE_ERR_SWFW_SYNC; 2104 return IXGBE_ERR_SWFW_SYNC;
2211 2105
@@ -2223,7 +2117,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2223 } 2117 }
2224 2118
2225 if (!timeout) { 2119 if (!timeout) {
2226 hw_dbg(hw, "Driver can't access resource, GSSR timeout.\n"); 2120 hw_dbg(hw, "Driver can't access resource, SW_FW_SYNC timeout.\n");
2227 return IXGBE_ERR_SWFW_SYNC; 2121 return IXGBE_ERR_SWFW_SYNC;
2228 } 2122 }
2229 2123
@@ -2427,37 +2321,38 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
2427 u32 mpsar_lo, mpsar_hi; 2321 u32 mpsar_lo, mpsar_hi;
2428 u32 rar_entries = hw->mac.num_rar_entries; 2322 u32 rar_entries = hw->mac.num_rar_entries;
2429 2323
2430 if (rar < rar_entries) { 2324 /* Make sure we are using a valid rar index range */
2431 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); 2325 if (rar >= rar_entries) {
2432 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); 2326 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
2327 return IXGBE_ERR_INVALID_ARGUMENT;
2328 }
2433 2329
2434 if (!mpsar_lo && !mpsar_hi) 2330 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2435 goto done; 2331 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2436 2332
2437 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) { 2333 if (!mpsar_lo && !mpsar_hi)
2438 if (mpsar_lo) { 2334 goto done;
2439 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
2440 mpsar_lo = 0;
2441 }
2442 if (mpsar_hi) {
2443 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
2444 mpsar_hi = 0;
2445 }
2446 } else if (vmdq < 32) {
2447 mpsar_lo &= ~(1 << vmdq);
2448 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
2449 } else {
2450 mpsar_hi &= ~(1 << (vmdq - 32));
2451 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
2452 }
2453 2335
2454 /* was that the last pool using this rar? */ 2336 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
2455 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0) 2337 if (mpsar_lo) {
2456 hw->mac.ops.clear_rar(hw, rar); 2338 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
2339 mpsar_lo = 0;
2340 }
2341 if (mpsar_hi) {
2342 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
2343 mpsar_hi = 0;
2344 }
2345 } else if (vmdq < 32) {
2346 mpsar_lo &= ~(1 << vmdq);
2347 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
2457 } else { 2348 } else {
2458 hw_dbg(hw, "RAR index %d is out of range.\n", rar); 2349 mpsar_hi &= ~(1 << (vmdq - 32));
2350 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
2459 } 2351 }
2460 2352
2353 /* was that the last pool using this rar? */
2354 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
2355 hw->mac.ops.clear_rar(hw, rar);
2461done: 2356done:
2462 return 0; 2357 return 0;
2463} 2358}
@@ -2473,18 +2368,20 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
2473 u32 mpsar; 2368 u32 mpsar;
2474 u32 rar_entries = hw->mac.num_rar_entries; 2369 u32 rar_entries = hw->mac.num_rar_entries;
2475 2370
2476 if (rar < rar_entries) { 2371 /* Make sure we are using a valid rar index range */
2477 if (vmdq < 32) { 2372 if (rar >= rar_entries) {
2478 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2479 mpsar |= 1 << vmdq;
2480 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
2481 } else {
2482 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2483 mpsar |= 1 << (vmdq - 32);
2484 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
2485 }
2486 } else {
2487 hw_dbg(hw, "RAR index %d is out of range.\n", rar); 2373 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
2374 return IXGBE_ERR_INVALID_ARGUMENT;
2375 }
2376
2377 if (vmdq < 32) {
2378 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2379 mpsar |= 1 << vmdq;
2380 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
2381 } else {
2382 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2383 mpsar |= 1 << (vmdq - 32);
2384 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
2488 } 2385 }
2489 return 0; 2386 return 0;
2490} 2387}
@@ -2497,7 +2394,6 @@ s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
2497{ 2394{
2498 int i; 2395 int i;
2499 2396
2500
2501 for (i = 0; i < 128; i++) 2397 for (i = 0; i < 128; i++)
2502 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); 2398 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
2503 2399
@@ -2726,12 +2622,21 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
2726 * Reads the links register to determine if link is up and the current speed 2622 * Reads the links register to determine if link is up and the current speed
2727 **/ 2623 **/
2728s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, 2624s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
2729 bool *link_up, bool link_up_wait_to_complete) 2625 bool *link_up, bool link_up_wait_to_complete)
2730{ 2626{
2731 u32 links_reg; 2627 u32 links_reg, links_orig;
2732 u32 i; 2628 u32 i;
2733 2629
2630 /* clear the old state */
2631 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
2632
2734 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 2633 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
2634
2635 if (links_orig != links_reg) {
2636 hw_dbg(hw, "LINKS changed from %08X to %08X\n",
2637 links_orig, links_reg);
2638 }
2639
2735 if (link_up_wait_to_complete) { 2640 if (link_up_wait_to_complete) {
2736 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { 2641 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
2737 if (links_reg & IXGBE_LINKS_UP) { 2642 if (links_reg & IXGBE_LINKS_UP) {
@@ -2754,10 +2659,13 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
2754 IXGBE_LINKS_SPEED_10G_82599) 2659 IXGBE_LINKS_SPEED_10G_82599)
2755 *speed = IXGBE_LINK_SPEED_10GB_FULL; 2660 *speed = IXGBE_LINK_SPEED_10GB_FULL;
2756 else if ((links_reg & IXGBE_LINKS_SPEED_82599) == 2661 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
2757 IXGBE_LINKS_SPEED_1G_82599) 2662 IXGBE_LINKS_SPEED_1G_82599)
2758 *speed = IXGBE_LINK_SPEED_1GB_FULL; 2663 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2759 else 2664 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
2665 IXGBE_LINKS_SPEED_100_82599)
2760 *speed = IXGBE_LINK_SPEED_100_FULL; 2666 *speed = IXGBE_LINK_SPEED_100_FULL;
2667 else
2668 *speed = IXGBE_LINK_SPEED_UNKNOWN;
2761 2669
2762 /* if link is down, zero out the current_mode */ 2670 /* if link is down, zero out the current_mode */
2763 if (*link_up == false) { 2671 if (*link_up == false) {
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 66ed045a8cf..508f635fc2c 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -29,6 +29,7 @@
29#define _IXGBE_COMMON_H_ 29#define _IXGBE_COMMON_H_
30 30
31#include "ixgbe_type.h" 31#include "ixgbe_type.h"
32#include "ixgbe.h"
32 33
33u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw); 34u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
34s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw); 35s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
@@ -62,8 +63,6 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
62s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw); 63s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
63s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, 64s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
64 struct net_device *netdev); 65 struct net_device *netdev);
65s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
66 struct net_device *netdev);
67s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw); 66s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
68s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw); 67s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
69s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); 68s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
@@ -110,9 +109,8 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
110 109
111#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS) 110#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
112 111
113extern struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw);
114#define hw_dbg(hw, format, arg...) \ 112#define hw_dbg(hw, format, arg...) \
115 netdev_dbg(ixgbe_get_hw_dev(hw), format, ##arg) 113 netdev_dbg(((struct ixgbe_adapter *)(hw->back))->netdev, format, ##arg)
116#define e_dev_info(format, arg...) \ 114#define e_dev_info(format, arg...) \
117 dev_info(&adapter->pdev->dev, format, ## arg) 115 dev_info(&adapter->pdev->dev, format, ## arg)
118#define e_dev_warn(format, arg...) \ 116#define e_dev_warn(format, arg...) \
diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c
index d16c260c1f5..c2ee6fcb4e9 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ixgbe/ixgbe_dcb.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -34,6 +34,42 @@
34#include "ixgbe_dcb_82599.h" 34#include "ixgbe_dcb_82599.h"
35 35
36/** 36/**
37 * ixgbe_ieee_credits - This calculates the ieee traffic class
38 * credits from the configured bandwidth percentages. Credits
39 * are the smallest unit programable into the underlying
40 * hardware. The IEEE 802.1Qaz specification do not use bandwidth
41 * groups so this is much simplified from the CEE case.
42 */
43s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, __u16 *max, int max_frame)
44{
45 int min_percent = 100;
46 int min_credit, multiplier;
47 int i;
48
49 min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) /
50 DCB_CREDIT_QUANTUM;
51
52 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
53 if (bw[i] < min_percent && bw[i])
54 min_percent = bw[i];
55 }
56
57 multiplier = (min_credit / min_percent) + 1;
58
59 /* Find out the hw credits for each TC */
60 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
61 int val = min(bw[i] * multiplier, MAX_CREDIT_REFILL);
62
63 if (val < min_credit)
64 val = min_credit;
65 refill[i] = val;
66
67 max[i] = (bw[i] * MAX_CREDIT)/100;
68 }
69 return 0;
70}
71
72/**
37 * ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits 73 * ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits
38 * @ixgbe_dcb_config: Struct containing DCB settings. 74 * @ixgbe_dcb_config: Struct containing DCB settings.
39 * @direction: Configuring either Tx or Rx. 75 * @direction: Configuring either Tx or Rx.
@@ -141,6 +177,59 @@ out:
141 return ret_val; 177 return ret_val;
142} 178}
143 179
180void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en)
181{
182 int i;
183
184 *pfc_en = 0;
185 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
186 *pfc_en |= (cfg->tc_config[i].dcb_pfc & 0xF) << i;
187}
188
189void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *cfg, int direction,
190 u16 *refill)
191{
192 struct tc_bw_alloc *p;
193 int i;
194
195 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
196 p = &cfg->tc_config[i].path[direction];
197 refill[i] = p->data_credits_refill;
198 }
199}
200
201void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *cfg, u16 *max)
202{
203 int i;
204
205 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
206 max[i] = cfg->tc_config[i].desc_credits_max;
207}
208
209void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *cfg, int direction,
210 u8 *bwgid)
211{
212 struct tc_bw_alloc *p;
213 int i;
214
215 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
216 p = &cfg->tc_config[i].path[direction];
217 bwgid[i] = p->bwg_id;
218 }
219}
220
221void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction,
222 u8 *ptype)
223{
224 struct tc_bw_alloc *p;
225 int i;
226
227 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
228 p = &cfg->tc_config[i].path[direction];
229 ptype[i] = p->prio_type;
230 }
231}
232
144/** 233/**
145 * ixgbe_dcb_hw_config - Config and enable DCB 234 * ixgbe_dcb_hw_config - Config and enable DCB
146 * @hw: pointer to hardware structure 235 * @hw: pointer to hardware structure
@@ -152,13 +241,30 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
152 struct ixgbe_dcb_config *dcb_config) 241 struct ixgbe_dcb_config *dcb_config)
153{ 242{
154 s32 ret = 0; 243 s32 ret = 0;
244 u8 pfc_en;
245 u8 ptype[MAX_TRAFFIC_CLASS];
246 u8 bwgid[MAX_TRAFFIC_CLASS];
247 u16 refill[MAX_TRAFFIC_CLASS];
248 u16 max[MAX_TRAFFIC_CLASS];
249
250 /* Unpack CEE standard containers */
251 ixgbe_dcb_unpack_pfc(dcb_config, &pfc_en);
252 ixgbe_dcb_unpack_refill(dcb_config, DCB_TX_CONFIG, refill);
253 ixgbe_dcb_unpack_max(dcb_config, max);
254 ixgbe_dcb_unpack_bwgid(dcb_config, DCB_TX_CONFIG, bwgid);
255 ixgbe_dcb_unpack_prio(dcb_config, DCB_TX_CONFIG, ptype);
256
155 switch (hw->mac.type) { 257 switch (hw->mac.type) {
156 case ixgbe_mac_82598EB: 258 case ixgbe_mac_82598EB:
157 ret = ixgbe_dcb_hw_config_82598(hw, dcb_config); 259 ret = ixgbe_dcb_hw_config_82598(hw, dcb_config->rx_pba_cfg,
260 pfc_en, refill, max, bwgid,
261 ptype);
158 break; 262 break;
159 case ixgbe_mac_82599EB: 263 case ixgbe_mac_82599EB:
160 case ixgbe_mac_X540: 264 case ixgbe_mac_X540:
161 ret = ixgbe_dcb_hw_config_82599(hw, dcb_config); 265 ret = ixgbe_dcb_hw_config_82599(hw, dcb_config->rx_pba_cfg,
266 pfc_en, refill, max, bwgid,
267 ptype);
162 break; 268 break;
163 default: 269 default:
164 break; 270 break;
@@ -166,3 +272,70 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
166 return ret; 272 return ret;
167} 273}
168 274
275/* Helper routines to abstract HW specifics from DCB netlink ops */
276s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en)
277{
278 int ret = -EINVAL;
279
280 switch (hw->mac.type) {
281 case ixgbe_mac_82598EB:
282 ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en);
283 break;
284 case ixgbe_mac_82599EB:
285 case ixgbe_mac_X540:
286 ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en);
287 break;
288 default:
289 break;
290 }
291 return ret;
292}
293
294s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
295 u16 *refill, u16 *max, u8 *bwg_id, u8 *tsa)
296{
297 int i;
298 u8 prio_type[IEEE_8021QAZ_MAX_TCS];
299
300 /* Map TSA onto CEE prio type */
301 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
302 switch (tsa[i]) {
303 case IEEE_8021QAZ_TSA_STRICT:
304 prio_type[i] = 2;
305 break;
306 case IEEE_8021QAZ_TSA_ETS:
307 prio_type[i] = 0;
308 break;
309 default:
310 /* Hardware only supports priority strict or
311 * ETS transmission selection algorithms if
312 * we receive some other value from dcbnl
313 * throw an error
314 */
315 return -EINVAL;
316 }
317 }
318
319 switch (hw->mac.type) {
320 case ixgbe_mac_82598EB:
321 ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max,
322 prio_type);
323 ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max,
324 bwg_id, prio_type);
325 ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max,
326 bwg_id, prio_type);
327 break;
328 case ixgbe_mac_82599EB:
329 case ixgbe_mac_X540:
330 ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max,
331 bwg_id, prio_type);
332 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
333 bwg_id, prio_type);
334 ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max,
335 bwg_id, prio_type);
336 break;
337 default:
338 break;
339 }
340 return 0;
341}
diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h
index 1cfe38ee164..515bc27477f 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ixgbe/ixgbe_dcb.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -139,7 +139,6 @@ struct ixgbe_dcb_config {
139 struct tc_configuration tc_config[MAX_TRAFFIC_CLASS]; 139 struct tc_configuration tc_config[MAX_TRAFFIC_CLASS];
140 u8 bw_percentage[2][MAX_BW_GROUP]; /* One each for Tx/Rx */ 140 u8 bw_percentage[2][MAX_BW_GROUP]; /* One each for Tx/Rx */
141 bool pfc_mode_enable; 141 bool pfc_mode_enable;
142 bool round_robin_enable;
143 142
144 enum dcb_rx_pba_cfg rx_pba_cfg; 143 enum dcb_rx_pba_cfg rx_pba_cfg;
145 144
@@ -148,12 +147,21 @@ struct ixgbe_dcb_config {
148}; 147};
149 148
150/* DCB driver APIs */ 149/* DCB driver APIs */
150void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en);
151void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *, int, u16 *);
152void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *, u16 *);
153void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *, int, u8 *);
154void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *, int, u8 *);
151 155
152/* DCB credits calculation */ 156/* DCB credits calculation */
157s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, __u16 *max, int max_frame);
153s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *, 158s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *,
154 struct ixgbe_dcb_config *, int, u8); 159 struct ixgbe_dcb_config *, int, u8);
155 160
156/* DCB hw initialization */ 161/* DCB hw initialization */
162s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
163 u16 *refill, u16 *max, u8 *bwg_id, u8 *prio_type);
164s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en);
157s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *); 165s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
158 166
159/* DCB definitions for credit calculation */ 167/* DCB definitions for credit calculation */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ixgbe/ixgbe_dcb_82598.c
index 9a5e89c12e0..c97cf9160dc 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -38,15 +38,14 @@
38 * 38 *
39 * Configure packet buffers for DCB mode. 39 * Configure packet buffers for DCB mode.
40 */ 40 */
41static s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw, 41static s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw, u8 rx_pba)
42 struct ixgbe_dcb_config *dcb_config)
43{ 42{
44 s32 ret_val = 0; 43 s32 ret_val = 0;
45 u32 value = IXGBE_RXPBSIZE_64KB; 44 u32 value = IXGBE_RXPBSIZE_64KB;
46 u8 i = 0; 45 u8 i = 0;
47 46
48 /* Setup Rx packet buffer sizes */ 47 /* Setup Rx packet buffer sizes */
49 switch (dcb_config->rx_pba_cfg) { 48 switch (rx_pba) {
50 case pba_80_48: 49 case pba_80_48:
51 /* Setup the first four at 80KB */ 50 /* Setup the first four at 80KB */
52 value = IXGBE_RXPBSIZE_80KB; 51 value = IXGBE_RXPBSIZE_80KB;
@@ -78,10 +77,11 @@ static s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw,
78 * 77 *
79 * Configure Rx Data Arbiter and credits for each traffic class. 78 * Configure Rx Data Arbiter and credits for each traffic class.
80 */ 79 */
81static s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, 80s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
82 struct ixgbe_dcb_config *dcb_config) 81 u16 *refill,
82 u16 *max,
83 u8 *prio_type)
83{ 84{
84 struct tc_bw_alloc *p;
85 u32 reg = 0; 85 u32 reg = 0;
86 u32 credit_refill = 0; 86 u32 credit_refill = 0;
87 u32 credit_max = 0; 87 u32 credit_max = 0;
@@ -102,13 +102,12 @@ static s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
102 102
103 /* Configure traffic class credits and priority */ 103 /* Configure traffic class credits and priority */
104 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 104 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
105 p = &dcb_config->tc_config[i].path[DCB_RX_CONFIG]; 105 credit_refill = refill[i];
106 credit_refill = p->data_credits_refill; 106 credit_max = max[i];
107 credit_max = p->data_credits_max;
108 107
109 reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT); 108 reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT);
110 109
111 if (p->prio_type == prio_link) 110 if (prio_type[i] == prio_link)
112 reg |= IXGBE_RT2CR_LSP; 111 reg |= IXGBE_RT2CR_LSP;
113 112
114 IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg); 113 IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg);
@@ -135,10 +134,12 @@ static s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
135 * 134 *
136 * Configure Tx Descriptor Arbiter and credits for each traffic class. 135 * Configure Tx Descriptor Arbiter and credits for each traffic class.
137 */ 136 */
138static s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, 137s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
139 struct ixgbe_dcb_config *dcb_config) 138 u16 *refill,
139 u16 *max,
140 u8 *bwg_id,
141 u8 *prio_type)
140{ 142{
141 struct tc_bw_alloc *p;
142 u32 reg, max_credits; 143 u32 reg, max_credits;
143 u8 i; 144 u8 i;
144 145
@@ -146,10 +147,8 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
146 147
147 /* Enable arbiter */ 148 /* Enable arbiter */
148 reg &= ~IXGBE_DPMCS_ARBDIS; 149 reg &= ~IXGBE_DPMCS_ARBDIS;
149 if (!(dcb_config->round_robin_enable)) { 150 /* Enable DFP and Recycle mode */
150 /* Enable DFP and Recycle mode */ 151 reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM);
151 reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM);
152 }
153 reg |= IXGBE_DPMCS_TSOEF; 152 reg |= IXGBE_DPMCS_TSOEF;
154 /* Configure Max TSO packet size 34KB including payload and headers */ 153 /* Configure Max TSO packet size 34KB including payload and headers */
155 reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT); 154 reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT);
@@ -158,16 +157,15 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
158 157
159 /* Configure traffic class credits and priority */ 158 /* Configure traffic class credits and priority */
160 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 159 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
161 p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG]; 160 max_credits = max[i];
162 max_credits = dcb_config->tc_config[i].desc_credits_max;
163 reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT; 161 reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT;
164 reg |= p->data_credits_refill; 162 reg |= refill[i];
165 reg |= (u32)(p->bwg_id) << IXGBE_TDTQ2TCCR_BWG_SHIFT; 163 reg |= (u32)(bwg_id[i]) << IXGBE_TDTQ2TCCR_BWG_SHIFT;
166 164
167 if (p->prio_type == prio_group) 165 if (prio_type[i] == prio_group)
168 reg |= IXGBE_TDTQ2TCCR_GSP; 166 reg |= IXGBE_TDTQ2TCCR_GSP;
169 167
170 if (p->prio_type == prio_link) 168 if (prio_type[i] == prio_link)
171 reg |= IXGBE_TDTQ2TCCR_LSP; 169 reg |= IXGBE_TDTQ2TCCR_LSP;
172 170
173 IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg); 171 IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg);
@@ -183,10 +181,12 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
183 * 181 *
184 * Configure Tx Data Arbiter and credits for each traffic class. 182 * Configure Tx Data Arbiter and credits for each traffic class.
185 */ 183 */
186static s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, 184s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
187 struct ixgbe_dcb_config *dcb_config) 185 u16 *refill,
186 u16 *max,
187 u8 *bwg_id,
188 u8 *prio_type)
188{ 189{
189 struct tc_bw_alloc *p;
190 u32 reg; 190 u32 reg;
191 u8 i; 191 u8 i;
192 192
@@ -200,15 +200,14 @@ static s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
200 200
201 /* Configure traffic class credits and priority */ 201 /* Configure traffic class credits and priority */
202 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 202 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
203 p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG]; 203 reg = refill[i];
204 reg = p->data_credits_refill; 204 reg |= (u32)(max[i]) << IXGBE_TDPT2TCCR_MCL_SHIFT;
205 reg |= (u32)(p->data_credits_max) << IXGBE_TDPT2TCCR_MCL_SHIFT; 205 reg |= (u32)(bwg_id[i]) << IXGBE_TDPT2TCCR_BWG_SHIFT;
206 reg |= (u32)(p->bwg_id) << IXGBE_TDPT2TCCR_BWG_SHIFT;
207 206
208 if (p->prio_type == prio_group) 207 if (prio_type[i] == prio_group)
209 reg |= IXGBE_TDPT2TCCR_GSP; 208 reg |= IXGBE_TDPT2TCCR_GSP;
210 209
211 if (p->prio_type == prio_link) 210 if (prio_type[i] == prio_link)
212 reg |= IXGBE_TDPT2TCCR_LSP; 211 reg |= IXGBE_TDPT2TCCR_LSP;
213 212
214 IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg); 213 IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg);
@@ -229,13 +228,12 @@ static s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
229 * 228 *
230 * Configure Priority Flow Control for each traffic class. 229 * Configure Priority Flow Control for each traffic class.
231 */ 230 */
232s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, 231s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
233 struct ixgbe_dcb_config *dcb_config)
234{ 232{
235 u32 reg, rx_pba_size; 233 u32 reg, rx_pba_size;
236 u8 i; 234 u8 i;
237 235
238 if (!dcb_config->pfc_mode_enable) 236 if (!pfc_en)
239 goto out; 237 goto out;
240 238
241 /* Enable Transmit Priority Flow Control */ 239 /* Enable Transmit Priority Flow Control */
@@ -256,19 +254,20 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw,
256 * for each traffic class. 254 * for each traffic class.
257 */ 255 */
258 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 256 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
257 int enabled = pfc_en & (1 << i);
259 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); 258 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
260 rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT; 259 rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
261 reg = (rx_pba_size - hw->fc.low_water) << 10; 260 reg = (rx_pba_size - hw->fc.low_water) << 10;
262 261
263 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx || 262 if (enabled == pfc_enabled_tx ||
264 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full) 263 enabled == pfc_enabled_full)
265 reg |= IXGBE_FCRTL_XONE; 264 reg |= IXGBE_FCRTL_XONE;
266 265
267 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg); 266 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg);
268 267
269 reg = (rx_pba_size - hw->fc.high_water) << 10; 268 reg = (rx_pba_size - hw->fc.high_water) << 10;
270 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx || 269 if (enabled == pfc_enabled_tx ||
271 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full) 270 enabled == pfc_enabled_full)
272 reg |= IXGBE_FCRTH_FCEN; 271 reg |= IXGBE_FCRTH_FCEN;
273 272
274 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg); 273 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
@@ -292,7 +291,7 @@ out:
292 * Configure queue statistics registers, all queues belonging to same traffic 291 * Configure queue statistics registers, all queues belonging to same traffic
293 * class uses a single set of queue statistics counters. 292 * class uses a single set of queue statistics counters.
294 */ 293 */
295static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw) 294s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
296{ 295{
297 u32 reg = 0; 296 u32 reg = 0;
298 u8 i = 0; 297 u8 i = 0;
@@ -325,13 +324,16 @@ static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
325 * Configure dcb settings and enable dcb mode. 324 * Configure dcb settings and enable dcb mode.
326 */ 325 */
327s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, 326s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw,
328 struct ixgbe_dcb_config *dcb_config) 327 u8 rx_pba, u8 pfc_en, u16 *refill,
328 u16 *max, u8 *bwg_id, u8 *prio_type)
329{ 329{
330 ixgbe_dcb_config_packet_buffers_82598(hw, dcb_config); 330 ixgbe_dcb_config_packet_buffers_82598(hw, rx_pba);
331 ixgbe_dcb_config_rx_arbiter_82598(hw, dcb_config); 331 ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, prio_type);
332 ixgbe_dcb_config_tx_desc_arbiter_82598(hw, dcb_config); 332 ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max,
333 ixgbe_dcb_config_tx_data_arbiter_82598(hw, dcb_config); 333 bwg_id, prio_type);
334 ixgbe_dcb_config_pfc_82598(hw, dcb_config); 334 ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max,
335 bwg_id, prio_type);
336 ixgbe_dcb_config_pfc_82598(hw, pfc_en);
335 ixgbe_dcb_config_tc_stats_82598(hw); 337 ixgbe_dcb_config_tc_stats_82598(hw);
336 338
337 return 0; 339 return 0;
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ixgbe/ixgbe_dcb_82598.h
index abc03ccfa08..1e9750c2b46 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -71,9 +71,28 @@
71/* DCB hardware-specific driver APIs */ 71/* DCB hardware-specific driver APIs */
72 72
73/* DCB PFC functions */ 73/* DCB PFC functions */
74s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, struct ixgbe_dcb_config *); 74s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8 pfc_en);
75 75
76/* DCB hw initialization */ 76/* DCB hw initialization */
77s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *, struct ixgbe_dcb_config *); 77s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
78 u16 *refill,
79 u16 *max,
80 u8 *prio_type);
81
82s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
83 u16 *refill,
84 u16 *max,
85 u8 *bwg_id,
86 u8 *prio_type);
87
88s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
89 u16 *refill,
90 u16 *max,
91 u8 *bwg_id,
92 u8 *prio_type);
93
94s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw,
95 u8 rx_pba, u8 pfc_en, u16 *refill,
96 u16 *max, u8 *bwg_id, u8 *prio_type);
78 97
79#endif /* _DCB_82598_CONFIG_H */ 98#endif /* _DCB_82598_CONFIG_H */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c
index 374e1f74d0f..beaa1c1c1e6 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -33,19 +33,18 @@
33/** 33/**
34 * ixgbe_dcb_config_packet_buffers_82599 - Configure DCB packet buffers 34 * ixgbe_dcb_config_packet_buffers_82599 - Configure DCB packet buffers
35 * @hw: pointer to hardware structure 35 * @hw: pointer to hardware structure
36 * @dcb_config: pointer to ixgbe_dcb_config structure 36 * @rx_pba: method to distribute packet buffer
37 * 37 *
38 * Configure packet buffers for DCB mode. 38 * Configure packet buffers for DCB mode.
39 */ 39 */
40static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw, 40static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw, u8 rx_pba)
41 struct ixgbe_dcb_config *dcb_config)
42{ 41{
43 s32 ret_val = 0; 42 s32 ret_val = 0;
44 u32 value = IXGBE_RXPBSIZE_64KB; 43 u32 value = IXGBE_RXPBSIZE_64KB;
45 u8 i = 0; 44 u8 i = 0;
46 45
47 /* Setup Rx packet buffer sizes */ 46 /* Setup Rx packet buffer sizes */
48 switch (dcb_config->rx_pba_cfg) { 47 switch (rx_pba) {
49 case pba_80_48: 48 case pba_80_48:
50 /* Setup the first four at 80KB */ 49 /* Setup the first four at 80KB */
51 value = IXGBE_RXPBSIZE_80KB; 50 value = IXGBE_RXPBSIZE_80KB;
@@ -75,14 +74,19 @@ static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw,
75/** 74/**
76 * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter 75 * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter
77 * @hw: pointer to hardware structure 76 * @hw: pointer to hardware structure
78 * @dcb_config: pointer to ixgbe_dcb_config structure 77 * @refill: refill credits index by traffic class
78 * @max: max credits index by traffic class
79 * @bwg_id: bandwidth grouping indexed by traffic class
80 * @prio_type: priority type indexed by traffic class
79 * 81 *
80 * Configure Rx Packet Arbiter and credits for each traffic class. 82 * Configure Rx Packet Arbiter and credits for each traffic class.
81 */ 83 */
82static s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, 84s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
83 struct ixgbe_dcb_config *dcb_config) 85 u16 *refill,
86 u16 *max,
87 u8 *bwg_id,
88 u8 *prio_type)
84{ 89{
85 struct tc_bw_alloc *p;
86 u32 reg = 0; 90 u32 reg = 0;
87 u32 credit_refill = 0; 91 u32 credit_refill = 0;
88 u32 credit_max = 0; 92 u32 credit_max = 0;
@@ -103,15 +107,13 @@ static s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
103 107
104 /* Configure traffic class credits and priority */ 108 /* Configure traffic class credits and priority */
105 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 109 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
106 p = &dcb_config->tc_config[i].path[DCB_RX_CONFIG]; 110 credit_refill = refill[i];
107 111 credit_max = max[i];
108 credit_refill = p->data_credits_refill;
109 credit_max = p->data_credits_max;
110 reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT); 112 reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT);
111 113
112 reg |= (u32)(p->bwg_id) << IXGBE_RTRPT4C_BWG_SHIFT; 114 reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT;
113 115
114 if (p->prio_type == prio_link) 116 if (prio_type[i] == prio_link)
115 reg |= IXGBE_RTRPT4C_LSP; 117 reg |= IXGBE_RTRPT4C_LSP;
116 118
117 IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg); 119 IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg);
@@ -130,14 +132,19 @@ static s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
130/** 132/**
131 * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter 133 * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter
132 * @hw: pointer to hardware structure 134 * @hw: pointer to hardware structure
133 * @dcb_config: pointer to ixgbe_dcb_config structure 135 * @refill: refill credits index by traffic class
136 * @max: max credits index by traffic class
137 * @bwg_id: bandwidth grouping indexed by traffic class
138 * @prio_type: priority type indexed by traffic class
134 * 139 *
135 * Configure Tx Descriptor Arbiter and credits for each traffic class. 140 * Configure Tx Descriptor Arbiter and credits for each traffic class.
136 */ 141 */
137static s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, 142s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
138 struct ixgbe_dcb_config *dcb_config) 143 u16 *refill,
144 u16 *max,
145 u8 *bwg_id,
146 u8 *prio_type)
139{ 147{
140 struct tc_bw_alloc *p;
141 u32 reg, max_credits; 148 u32 reg, max_credits;
142 u8 i; 149 u8 i;
143 150
@@ -149,16 +156,15 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
149 156
150 /* Configure traffic class credits and priority */ 157 /* Configure traffic class credits and priority */
151 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 158 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
152 p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG]; 159 max_credits = max[i];
153 max_credits = dcb_config->tc_config[i].desc_credits_max;
154 reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT; 160 reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT;
155 reg |= p->data_credits_refill; 161 reg |= refill[i];
156 reg |= (u32)(p->bwg_id) << IXGBE_RTTDT2C_BWG_SHIFT; 162 reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT;
157 163
158 if (p->prio_type == prio_group) 164 if (prio_type[i] == prio_group)
159 reg |= IXGBE_RTTDT2C_GSP; 165 reg |= IXGBE_RTTDT2C_GSP;
160 166
161 if (p->prio_type == prio_link) 167 if (prio_type[i] == prio_link)
162 reg |= IXGBE_RTTDT2C_LSP; 168 reg |= IXGBE_RTTDT2C_LSP;
163 169
164 IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg); 170 IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg);
@@ -177,14 +183,19 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
177/** 183/**
178 * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter 184 * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter
179 * @hw: pointer to hardware structure 185 * @hw: pointer to hardware structure
180 * @dcb_config: pointer to ixgbe_dcb_config structure 186 * @refill: refill credits index by traffic class
187 * @max: max credits index by traffic class
188 * @bwg_id: bandwidth grouping indexed by traffic class
189 * @prio_type: priority type indexed by traffic class
181 * 190 *
182 * Configure Tx Packet Arbiter and credits for each traffic class. 191 * Configure Tx Packet Arbiter and credits for each traffic class.
183 */ 192 */
184static s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, 193s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
185 struct ixgbe_dcb_config *dcb_config) 194 u16 *refill,
195 u16 *max,
196 u8 *bwg_id,
197 u8 *prio_type)
186{ 198{
187 struct tc_bw_alloc *p;
188 u32 reg; 199 u32 reg;
189 u8 i; 200 u8 i;
190 201
@@ -205,15 +216,14 @@ static s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
205 216
206 /* Configure traffic class credits and priority */ 217 /* Configure traffic class credits and priority */
207 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 218 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
208 p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG]; 219 reg = refill[i];
209 reg = p->data_credits_refill; 220 reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT;
210 reg |= (u32)(p->data_credits_max) << IXGBE_RTTPT2C_MCL_SHIFT; 221 reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT;
211 reg |= (u32)(p->bwg_id) << IXGBE_RTTPT2C_BWG_SHIFT;
212 222
213 if (p->prio_type == prio_group) 223 if (prio_type[i] == prio_group)
214 reg |= IXGBE_RTTPT2C_GSP; 224 reg |= IXGBE_RTTPT2C_GSP;
215 225
216 if (p->prio_type == prio_link) 226 if (prio_type[i] == prio_link)
217 reg |= IXGBE_RTTPT2C_LSP; 227 reg |= IXGBE_RTTPT2C_LSP;
218 228
219 IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg); 229 IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg);
@@ -233,17 +243,16 @@ static s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
233/** 243/**
234 * ixgbe_dcb_config_pfc_82599 - Configure priority flow control 244 * ixgbe_dcb_config_pfc_82599 - Configure priority flow control
235 * @hw: pointer to hardware structure 245 * @hw: pointer to hardware structure
236 * @dcb_config: pointer to ixgbe_dcb_config structure 246 * @pfc_en: enabled pfc bitmask
237 * 247 *
238 * Configure Priority Flow Control (PFC) for each traffic class. 248 * Configure Priority Flow Control (PFC) for each traffic class.
239 */ 249 */
240s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, 250s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en)
241 struct ixgbe_dcb_config *dcb_config)
242{ 251{
243 u32 i, reg, rx_pba_size; 252 u32 i, reg, rx_pba_size;
244 253
245 /* If PFC is disabled globally then fall back to LFC. */ 254 /* If PFC is disabled globally then fall back to LFC. */
246 if (!dcb_config->pfc_mode_enable) { 255 if (!pfc_en) {
247 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) 256 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
248 hw->mac.ops.fc_enable(hw, i); 257 hw->mac.ops.fc_enable(hw, i);
249 goto out; 258 goto out;
@@ -251,19 +260,18 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw,
251 260
252 /* Configure PFC Tx thresholds per TC */ 261 /* Configure PFC Tx thresholds per TC */
253 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 262 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
263 int enabled = pfc_en & (1 << i);
254 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); 264 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
255 rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT; 265 rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
256 266
257 reg = (rx_pba_size - hw->fc.low_water) << 10; 267 reg = (rx_pba_size - hw->fc.low_water) << 10;
258 268
259 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full || 269 if (enabled)
260 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
261 reg |= IXGBE_FCRTL_XONE; 270 reg |= IXGBE_FCRTL_XONE;
262 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg); 271 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg);
263 272
264 reg = (rx_pba_size - hw->fc.high_water) << 10; 273 reg = (rx_pba_size - hw->fc.high_water) << 10;
265 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full || 274 if (enabled)
266 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
267 reg |= IXGBE_FCRTH_FCEN; 275 reg |= IXGBE_FCRTH_FCEN;
268 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg); 276 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
269 } 277 }
@@ -349,7 +357,6 @@ static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
349/** 357/**
350 * ixgbe_dcb_config_82599 - Configure general DCB parameters 358 * ixgbe_dcb_config_82599 - Configure general DCB parameters
351 * @hw: pointer to hardware structure 359 * @hw: pointer to hardware structure
352 * @dcb_config: pointer to ixgbe_dcb_config structure
353 * 360 *
354 * Configure general DCB parameters. 361 * Configure general DCB parameters.
355 */ 362 */
@@ -406,19 +413,27 @@ static s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw)
406/** 413/**
407 * ixgbe_dcb_hw_config_82599 - Configure and enable DCB 414 * ixgbe_dcb_hw_config_82599 - Configure and enable DCB
408 * @hw: pointer to hardware structure 415 * @hw: pointer to hardware structure
409 * @dcb_config: pointer to ixgbe_dcb_config structure 416 * @rx_pba: method to distribute packet buffer
417 * @refill: refill credits index by traffic class
418 * @max: max credits index by traffic class
419 * @bwg_id: bandwidth grouping indexed by traffic class
420 * @prio_type: priority type indexed by traffic class
421 * @pfc_en: enabled pfc bitmask
410 * 422 *
411 * Configure dcb settings and enable dcb mode. 423 * Configure dcb settings and enable dcb mode.
412 */ 424 */
413s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, 425s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw,
414 struct ixgbe_dcb_config *dcb_config) 426 u8 rx_pba, u8 pfc_en, u16 *refill,
427 u16 *max, u8 *bwg_id, u8 *prio_type)
415{ 428{
416 ixgbe_dcb_config_packet_buffers_82599(hw, dcb_config); 429 ixgbe_dcb_config_packet_buffers_82599(hw, rx_pba);
417 ixgbe_dcb_config_82599(hw); 430 ixgbe_dcb_config_82599(hw);
418 ixgbe_dcb_config_rx_arbiter_82599(hw, dcb_config); 431 ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, prio_type);
419 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, dcb_config); 432 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
420 ixgbe_dcb_config_tx_data_arbiter_82599(hw, dcb_config); 433 bwg_id, prio_type);
421 ixgbe_dcb_config_pfc_82599(hw, dcb_config); 434 ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max,
435 bwg_id, prio_type);
436 ixgbe_dcb_config_pfc_82599(hw, pfc_en);
422 ixgbe_dcb_config_tc_stats_82599(hw); 437 ixgbe_dcb_config_tc_stats_82599(hw);
423 438
424 return 0; 439 return 0;
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ixgbe/ixgbe_dcb_82599.h
index 3841649fb95..0b39ab4ffc7 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -102,11 +102,29 @@
102/* DCB hardware-specific driver APIs */ 102/* DCB hardware-specific driver APIs */
103 103
104/* DCB PFC functions */ 104/* DCB PFC functions */
105s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, 105s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en);
106 struct ixgbe_dcb_config *dcb_config);
107 106
108/* DCB hw initialization */ 107/* DCB hw initialization */
108s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
109 u16 *refill,
110 u16 *max,
111 u8 *bwg_id,
112 u8 *prio_type);
113
114s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
115 u16 *refill,
116 u16 *max,
117 u8 *bwg_id,
118 u8 *prio_type);
119
120s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
121 u16 *refill,
122 u16 *max,
123 u8 *bwg_id,
124 u8 *prio_type);
125
109s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, 126s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw,
110 struct ixgbe_dcb_config *config); 127 u8 rx_pba, u8 pfc_en, u16 *refill,
128 u16 *max, u8 *bwg_id, u8 *prio_type);
111 129
112#endif /* _DCB_82599_CONFIG_H */ 130#endif /* _DCB_82599_CONFIG_H */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index bf566e8a455..d7f0024014b 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -37,7 +37,6 @@
37#define BIT_PG_RX 0x04 37#define BIT_PG_RX 0x04
38#define BIT_PG_TX 0x08 38#define BIT_PG_TX 0x08
39#define BIT_APP_UPCHG 0x10 39#define BIT_APP_UPCHG 0x10
40#define BIT_RESETLINK 0x40
41#define BIT_LINKSPEED 0x80 40#define BIT_LINKSPEED 0x80
42 41
43/* Responses for the DCB_C_SET_ALL command */ 42/* Responses for the DCB_C_SET_ALL command */
@@ -225,10 +224,8 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
225 (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent != 224 (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent !=
226 adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent) || 225 adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent) ||
227 (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap != 226 (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
228 adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap)) { 227 adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap))
229 adapter->dcb_set_bitmap |= BIT_PG_TX; 228 adapter->dcb_set_bitmap |= BIT_PG_TX;
230 adapter->dcb_set_bitmap |= BIT_RESETLINK;
231 }
232} 229}
233 230
234static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, 231static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
@@ -239,10 +236,8 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
239 adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct; 236 adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct;
240 237
241 if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] != 238 if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] !=
242 adapter->dcb_cfg.bw_percentage[0][bwg_id]) { 239 adapter->dcb_cfg.bw_percentage[0][bwg_id])
243 adapter->dcb_set_bitmap |= BIT_PG_TX; 240 adapter->dcb_set_bitmap |= BIT_PG_TX;
244 adapter->dcb_set_bitmap |= BIT_RESETLINK;
245 }
246} 241}
247 242
248static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc, 243static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
@@ -269,10 +264,8 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
269 (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent != 264 (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent !=
270 adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent) || 265 adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent) ||
271 (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap != 266 (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap !=
272 adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap)) { 267 adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap))
273 adapter->dcb_set_bitmap |= BIT_PG_RX; 268 adapter->dcb_set_bitmap |= BIT_PG_RX;
274 adapter->dcb_set_bitmap |= BIT_RESETLINK;
275 }
276} 269}
277 270
278static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, 271static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
@@ -283,10 +276,8 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
283 adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct; 276 adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct;
284 277
285 if (adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] != 278 if (adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] !=
286 adapter->dcb_cfg.bw_percentage[1][bwg_id]) { 279 adapter->dcb_cfg.bw_percentage[1][bwg_id])
287 adapter->dcb_set_bitmap |= BIT_PG_RX; 280 adapter->dcb_set_bitmap |= BIT_PG_RX;
288 adapter->dcb_set_bitmap |= BIT_RESETLINK;
289 }
290} 281}
291 282
292static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, 283static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
@@ -365,21 +356,17 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
365 return DCB_NO_HW_CHG; 356 return DCB_NO_HW_CHG;
366 357
367 /* 358 /*
368 * Only take down the adapter if the configuration change 359 * Only take down the adapter if an app change occured. FCoE
369 * requires a reset. 360 * may shuffle tx rings in this case and this can not be done
361 * without a reset currently.
370 */ 362 */
371 if (adapter->dcb_set_bitmap & BIT_RESETLINK) { 363 if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
372 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) 364 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
373 msleep(1); 365 msleep(1);
374 366
375 if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { 367 if (netif_running(netdev))
376 if (netif_running(netdev)) 368 netdev->netdev_ops->ndo_stop(netdev);
377 netdev->netdev_ops->ndo_stop(netdev); 369 ixgbe_clear_interrupt_scheme(adapter);
378 ixgbe_clear_interrupt_scheme(adapter);
379 } else {
380 if (netif_running(netdev))
381 ixgbe_down(adapter);
382 }
383 } 370 }
384 371
385 if (adapter->dcb_cfg.pfc_mode_enable) { 372 if (adapter->dcb_cfg.pfc_mode_enable) {
@@ -408,29 +395,51 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
408 } 395 }
409 } 396 }
410 397
411 if (adapter->dcb_set_bitmap & BIT_RESETLINK) { 398 if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
412 if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { 399 ixgbe_init_interrupt_scheme(adapter);
413 ixgbe_init_interrupt_scheme(adapter); 400 if (netif_running(netdev))
414 if (netif_running(netdev)) 401 netdev->netdev_ops->ndo_open(netdev);
415 netdev->netdev_ops->ndo_open(netdev);
416 } else {
417 if (netif_running(netdev))
418 ixgbe_up(adapter);
419 }
420 ret = DCB_HW_CHG_RST; 402 ret = DCB_HW_CHG_RST;
421 } else if (adapter->dcb_set_bitmap & BIT_PFC) { 403 }
422 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 404
423 ixgbe_dcb_config_pfc_82598(&adapter->hw, 405 if (adapter->dcb_set_bitmap & BIT_PFC) {
424 &adapter->dcb_cfg); 406 u8 pfc_en;
425 else if (adapter->hw.mac.type == ixgbe_mac_82599EB) 407 ixgbe_dcb_unpack_pfc(&adapter->dcb_cfg, &pfc_en);
426 ixgbe_dcb_config_pfc_82599(&adapter->hw, 408 ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc_en);
427 &adapter->dcb_cfg);
428 ret = DCB_HW_CHG; 409 ret = DCB_HW_CHG;
429 } 410 }
411
412 if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) {
413 u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS];
414 u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS];
415 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
416
417#ifdef CONFIG_FCOE
418 if (adapter->netdev->features & NETIF_F_FCOE_MTU)
419 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
420#endif
421
422 ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg,
423 max_frame, DCB_TX_CONFIG);
424 ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg,
425 max_frame, DCB_RX_CONFIG);
426
427 ixgbe_dcb_unpack_refill(&adapter->dcb_cfg,
428 DCB_TX_CONFIG, refill);
429 ixgbe_dcb_unpack_max(&adapter->dcb_cfg, max);
430 ixgbe_dcb_unpack_bwgid(&adapter->dcb_cfg,
431 DCB_TX_CONFIG, bwg_id);
432 ixgbe_dcb_unpack_prio(&adapter->dcb_cfg,
433 DCB_TX_CONFIG, prio_type);
434
435 ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max,
436 bwg_id, prio_type);
437 }
438
430 if (adapter->dcb_cfg.pfc_mode_enable) 439 if (adapter->dcb_cfg.pfc_mode_enable)
431 adapter->hw.fc.current_mode = ixgbe_fc_pfc; 440 adapter->hw.fc.current_mode = ixgbe_fc_pfc;
432 441
433 if (adapter->dcb_set_bitmap & BIT_RESETLINK) 442 if (adapter->dcb_set_bitmap & BIT_APP_UPCHG)
434 clear_bit(__IXGBE_RESETTING, &adapter->state); 443 clear_bit(__IXGBE_RESETTING, &adapter->state);
435 adapter->dcb_set_bitmap = 0x00; 444 adapter->dcb_set_bitmap = 0x00;
436 return ret; 445 return ret;
@@ -568,18 +577,29 @@ static u8 ixgbe_dcbnl_setapp(struct net_device *netdev,
568 case DCB_APP_IDTYPE_ETHTYPE: 577 case DCB_APP_IDTYPE_ETHTYPE:
569#ifdef IXGBE_FCOE 578#ifdef IXGBE_FCOE
570 if (id == ETH_P_FCOE) { 579 if (id == ETH_P_FCOE) {
571 u8 tc; 580 u8 old_tc;
572 struct ixgbe_adapter *adapter; 581 struct ixgbe_adapter *adapter = netdev_priv(netdev);
573 582
574 adapter = netdev_priv(netdev); 583 /* Get current programmed tc */
575 tc = adapter->fcoe.tc; 584 old_tc = adapter->fcoe.tc;
576 rval = ixgbe_fcoe_setapp(adapter, up); 585 rval = ixgbe_fcoe_setapp(adapter, up);
577 if ((!rval) && (tc != adapter->fcoe.tc) && 586
578 (adapter->flags & IXGBE_FLAG_DCB_ENABLED) && 587 if (rval ||
579 (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) { 588 !(adapter->flags & IXGBE_FLAG_DCB_ENABLED) ||
589 !(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
590 break;
591
592 /* The FCoE application priority may be changed multiple
593 * times in quick sucession with switches that build up
594 * TLVs. To avoid creating uneeded device resets this
595 * checks the actual HW configuration and clears
596 * BIT_APP_UPCHG if a HW configuration change is not
597 * need
598 */
599 if (old_tc == adapter->fcoe.tc)
600 adapter->dcb_set_bitmap &= ~BIT_APP_UPCHG;
601 else
580 adapter->dcb_set_bitmap |= BIT_APP_UPCHG; 602 adapter->dcb_set_bitmap |= BIT_APP_UPCHG;
581 adapter->dcb_set_bitmap |= BIT_RESETLINK;
582 }
583 } 603 }
584#endif 604#endif
585 break; 605 break;
@@ -591,7 +611,98 @@ static u8 ixgbe_dcbnl_setapp(struct net_device *netdev,
591 return rval; 611 return rval;
592} 612}
593 613
614static int ixgbe_dcbnl_ieee_getets(struct net_device *dev,
615 struct ieee_ets *ets)
616{
617 struct ixgbe_adapter *adapter = netdev_priv(dev);
618 struct ieee_ets *my_ets = adapter->ixgbe_ieee_ets;
619
620 /* No IEEE PFC settings available */
621 if (!my_ets)
622 return -EINVAL;
623
624 ets->ets_cap = MAX_TRAFFIC_CLASS;
625 ets->cbs = my_ets->cbs;
626 memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
627 memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw));
628 memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
629 memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
630 return 0;
631}
632
633static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
634 struct ieee_ets *ets)
635{
636 struct ixgbe_adapter *adapter = netdev_priv(dev);
637 __u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS];
638 int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
639 int err;
640 /* naively give each TC a bwg to map onto CEE hardware */
641 __u8 bwg_id[IEEE_8021QAZ_MAX_TCS] = {0, 1, 2, 3, 4, 5, 6, 7};
642
643 if (!adapter->ixgbe_ieee_ets) {
644 adapter->ixgbe_ieee_ets = kmalloc(sizeof(struct ieee_ets),
645 GFP_KERNEL);
646 if (!adapter->ixgbe_ieee_ets)
647 return -ENOMEM;
648 }
649
650
651 memcpy(adapter->ixgbe_ieee_ets, ets, sizeof(*adapter->ixgbe_ieee_ets));
652
653 ixgbe_ieee_credits(ets->tc_tx_bw, refill, max, max_frame);
654 err = ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max,
655 bwg_id, ets->tc_tsa);
656 return err;
657}
658
659static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev,
660 struct ieee_pfc *pfc)
661{
662 struct ixgbe_adapter *adapter = netdev_priv(dev);
663 struct ieee_pfc *my_pfc = adapter->ixgbe_ieee_pfc;
664 int i;
665
666 /* No IEEE PFC settings available */
667 if (!my_pfc)
668 return -EINVAL;
669
670 pfc->pfc_cap = MAX_TRAFFIC_CLASS;
671 pfc->pfc_en = my_pfc->pfc_en;
672 pfc->mbc = my_pfc->mbc;
673 pfc->delay = my_pfc->delay;
674
675 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
676 pfc->requests[i] = adapter->stats.pxoffrxc[i];
677 pfc->indications[i] = adapter->stats.pxofftxc[i];
678 }
679
680 return 0;
681}
682
683static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
684 struct ieee_pfc *pfc)
685{
686 struct ixgbe_adapter *adapter = netdev_priv(dev);
687 int err;
688
689 if (!adapter->ixgbe_ieee_pfc) {
690 adapter->ixgbe_ieee_pfc = kmalloc(sizeof(struct ieee_pfc),
691 GFP_KERNEL);
692 if (!adapter->ixgbe_ieee_pfc)
693 return -ENOMEM;
694 }
695
696 memcpy(adapter->ixgbe_ieee_pfc, pfc, sizeof(*adapter->ixgbe_ieee_pfc));
697 err = ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en);
698 return err;
699}
700
594const struct dcbnl_rtnl_ops dcbnl_ops = { 701const struct dcbnl_rtnl_ops dcbnl_ops = {
702 .ieee_getets = ixgbe_dcbnl_ieee_getets,
703 .ieee_setets = ixgbe_dcbnl_ieee_setets,
704 .ieee_getpfc = ixgbe_dcbnl_ieee_getpfc,
705 .ieee_setpfc = ixgbe_dcbnl_ieee_setpfc,
595 .getstate = ixgbe_dcbnl_get_state, 706 .getstate = ixgbe_dcbnl_get_state,
596 .setstate = ixgbe_dcbnl_set_state, 707 .setstate = ixgbe_dcbnl_set_state,
597 .getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr, 708 .getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr,
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 2002ea88ca2..83511c02292 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -152,7 +152,17 @@ static int ixgbe_get_settings(struct net_device *netdev,
152 ecmd->supported |= (SUPPORTED_1000baseT_Full | 152 ecmd->supported |= (SUPPORTED_1000baseT_Full |
153 SUPPORTED_Autoneg); 153 SUPPORTED_Autoneg);
154 154
155 switch (hw->mac.type) {
156 case ixgbe_mac_X540:
157 ecmd->supported |= SUPPORTED_100baseT_Full;
158 break;
159 default:
160 break;
161 }
162
155 ecmd->advertising = ADVERTISED_Autoneg; 163 ecmd->advertising = ADVERTISED_Autoneg;
164 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
165 ecmd->advertising |= ADVERTISED_100baseT_Full;
156 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) 166 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
157 ecmd->advertising |= ADVERTISED_10000baseT_Full; 167 ecmd->advertising |= ADVERTISED_10000baseT_Full;
158 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) 168 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
@@ -167,6 +177,15 @@ static int ixgbe_get_settings(struct net_device *netdev,
167 ecmd->advertising |= (ADVERTISED_10000baseT_Full | 177 ecmd->advertising |= (ADVERTISED_10000baseT_Full |
168 ADVERTISED_1000baseT_Full); 178 ADVERTISED_1000baseT_Full);
169 179
180 switch (hw->mac.type) {
181 case ixgbe_mac_X540:
182 if (!(ecmd->advertising & ADVERTISED_100baseT_Full))
183 ecmd->advertising |= (ADVERTISED_100baseT_Full);
184 break;
185 default:
186 break;
187 }
188
170 if (hw->phy.media_type == ixgbe_media_type_copper) { 189 if (hw->phy.media_type == ixgbe_media_type_copper) {
171 ecmd->supported |= SUPPORTED_TP; 190 ecmd->supported |= SUPPORTED_TP;
172 ecmd->advertising |= ADVERTISED_TP; 191 ecmd->advertising |= ADVERTISED_TP;
@@ -271,8 +290,19 @@ static int ixgbe_get_settings(struct net_device *netdev,
271 290
272 hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 291 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
273 if (link_up) { 292 if (link_up) {
274 ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? 293 switch (link_speed) {
275 SPEED_10000 : SPEED_1000; 294 case IXGBE_LINK_SPEED_10GB_FULL:
295 ecmd->speed = SPEED_10000;
296 break;
297 case IXGBE_LINK_SPEED_1GB_FULL:
298 ecmd->speed = SPEED_1000;
299 break;
300 case IXGBE_LINK_SPEED_100_FULL:
301 ecmd->speed = SPEED_100;
302 break;
303 default:
304 break;
305 }
276 ecmd->duplex = DUPLEX_FULL; 306 ecmd->duplex = DUPLEX_FULL;
277 } else { 307 } else {
278 ecmd->speed = -1; 308 ecmd->speed = -1;
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index c54a88274d5..27203c87ea1 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h
index 65cc8fb14fe..02a00d2415d 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ixgbe/ixgbe_fcoe.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 30f9ccfb4f8..32231ffe071 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -54,7 +54,8 @@ static const char ixgbe_driver_string[] =
54 54
55#define DRV_VERSION "3.2.9-k2" 55#define DRV_VERSION "3.2.9-k2"
56const char ixgbe_driver_version[] = DRV_VERSION; 56const char ixgbe_driver_version[] = DRV_VERSION;
57static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation."; 57static const char ixgbe_copyright[] =
58 "Copyright (c) 1999-2011 Intel Corporation.";
58 59
59static const struct ixgbe_info *ixgbe_info_tbl[] = { 60static const struct ixgbe_info *ixgbe_info_tbl[] = {
60 [board_82598] = &ixgbe_82598_info, 61 [board_82598] = &ixgbe_82598_info,
@@ -648,7 +649,7 @@ void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
648 * 649 *
649 * Returns : a tc index for use in range 0-7, or 0-3 650 * Returns : a tc index for use in range 0-7, or 0-3
650 */ 651 */
651u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx) 652static u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx)
652{ 653{
653 int tc = -1; 654 int tc = -1;
654 int dcb_i = adapter->ring_feature[RING_F_DCB].indices; 655 int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
@@ -2597,6 +2598,11 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
2597 2598
2598 i--; 2599 i--;
2599 for (; i >= 0; i--) { 2600 for (; i >= 0; i--) {
2601 /* free only the irqs that were actually requested */
2602 if (!adapter->q_vector[i]->rxr_count &&
2603 !adapter->q_vector[i]->txr_count)
2604 continue;
2605
2600 free_irq(adapter->msix_entries[i].vector, 2606 free_irq(adapter->msix_entries[i].vector,
2601 adapter->q_vector[i]); 2607 adapter->q_vector[i]);
2602 } 2608 }
@@ -3077,6 +3083,14 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
3077 ixgbe_configure_srrctl(adapter, ring); 3083 ixgbe_configure_srrctl(adapter, ring);
3078 ixgbe_configure_rscctl(adapter, ring); 3084 ixgbe_configure_rscctl(adapter, ring);
3079 3085
3086 /* If operating in IOV mode set RLPML for X540 */
3087 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
3088 hw->mac.type == ixgbe_mac_X540) {
3089 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
3090 rxdctl |= ((ring->netdev->mtu + ETH_HLEN +
3091 ETH_FCS_LEN + VLAN_HLEN) | IXGBE_RXDCTL_RLPML_EN);
3092 }
3093
3080 if (hw->mac.type == ixgbe_mac_82598EB) { 3094 if (hw->mac.type == ixgbe_mac_82598EB) {
3081 /* 3095 /*
3082 * enable cache line friendly hardware writes: 3096 * enable cache line friendly hardware writes:
@@ -3876,7 +3890,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3876 * If we're not hot-pluggable SFP+, we just need to configure link 3890 * If we're not hot-pluggable SFP+, we just need to configure link
3877 * and bring it up. 3891 * and bring it up.
3878 */ 3892 */
3879 if (hw->phy.type == ixgbe_phy_unknown) 3893 if (hw->phy.type == ixgbe_phy_none)
3880 schedule_work(&adapter->sfp_config_module_task); 3894 schedule_work(&adapter->sfp_config_module_task);
3881 3895
3882 /* enable transmits */ 3896 /* enable transmits */
@@ -5174,7 +5188,6 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
5174 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100; 5188 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
5175 adapter->dcb_cfg.rx_pba_cfg = pba_equal; 5189 adapter->dcb_cfg.rx_pba_cfg = pba_equal;
5176 adapter->dcb_cfg.pfc_mode_enable = false; 5190 adapter->dcb_cfg.pfc_mode_enable = false;
5177 adapter->dcb_cfg.round_robin_enable = false;
5178 adapter->dcb_set_bitmap = 0x00; 5191 adapter->dcb_set_bitmap = 0x00;
5179 ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg, 5192 ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
5180 adapter->ring_feature[RING_F_DCB].indices); 5193 adapter->ring_feature[RING_F_DCB].indices);
@@ -5442,8 +5455,14 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
5442 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 5455 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
5443 5456
5444 /* MTU < 68 is an error and causes problems on some kernels */ 5457 /* MTU < 68 is an error and causes problems on some kernels */
5445 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) 5458 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED &&
5446 return -EINVAL; 5459 hw->mac.type != ixgbe_mac_X540) {
5460 if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
5461 return -EINVAL;
5462 } else {
5463 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
5464 return -EINVAL;
5465 }
5447 5466
5448 e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); 5467 e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
5449 /* must set new MTU before calling down or up */ 5468 /* must set new MTU before calling down or up */
@@ -5611,6 +5630,10 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
5611 } 5630 }
5612 5631
5613 ixgbe_clear_interrupt_scheme(adapter); 5632 ixgbe_clear_interrupt_scheme(adapter);
5633#ifdef CONFIG_DCB
5634 kfree(adapter->ixgbe_ieee_pfc);
5635 kfree(adapter->ixgbe_ieee_ets);
5636#endif
5614 5637
5615#ifdef CONFIG_PM 5638#ifdef CONFIG_PM
5616 retval = pci_save_state(pdev); 5639 retval = pci_save_state(pdev);
@@ -6101,7 +6124,10 @@ static void ixgbe_watchdog_task(struct work_struct *work)
6101 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? 6124 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
6102 "10 Gbps" : 6125 "10 Gbps" :
6103 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? 6126 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
6104 "1 Gbps" : "unknown speed")), 6127 "1 Gbps" :
6128 (link_speed == IXGBE_LINK_SPEED_100_FULL ?
6129 "100 Mbps" :
6130 "unknown speed"))),
6105 ((flow_rx && flow_tx) ? "RX/TX" : 6131 ((flow_rx && flow_tx) ? "RX/TX" :
6106 (flow_rx ? "RX" : 6132 (flow_rx ? "RX" :
6107 (flow_tx ? "TX" : "None")))); 6133 (flow_tx ? "TX" : "None"))));
@@ -7706,16 +7732,6 @@ static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
7706 7732
7707#endif /* CONFIG_IXGBE_DCA */ 7733#endif /* CONFIG_IXGBE_DCA */
7708 7734
7709/**
7710 * ixgbe_get_hw_dev return device
7711 * used by hardware layer to print debugging information
7712 **/
7713struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw)
7714{
7715 struct ixgbe_adapter *adapter = hw->back;
7716 return adapter->netdev;
7717}
7718
7719module_exit(ixgbe_exit_module); 7735module_exit(ixgbe_exit_module);
7720 7736
7721/* ixgbe_main.c */ 7737/* ixgbe_main.c */
diff --git a/drivers/net/ixgbe/ixgbe_mbx.c b/drivers/net/ixgbe/ixgbe_mbx.c
index ea82c5a1cd3..2acacfa5e37 100644
--- a/drivers/net/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ixgbe/ixgbe_mbx.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -437,6 +437,7 @@ out_no_read:
437 return ret_val; 437 return ret_val;
438} 438}
439 439
440#ifdef CONFIG_PCI_IOV
440/** 441/**
441 * ixgbe_init_mbx_params_pf - set initial values for pf mailbox 442 * ixgbe_init_mbx_params_pf - set initial values for pf mailbox
442 * @hw: pointer to the HW structure 443 * @hw: pointer to the HW structure
@@ -465,6 +466,7 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
465 break; 466 break;
466 } 467 }
467} 468}
469#endif /* CONFIG_PCI_IOV */
468 470
469struct ixgbe_mbx_operations mbx_ops_generic = { 471struct ixgbe_mbx_operations mbx_ops_generic = {
470 .read = ixgbe_read_mbx_pf, 472 .read = ixgbe_read_mbx_pf,
diff --git a/drivers/net/ixgbe/ixgbe_mbx.h b/drivers/net/ixgbe/ixgbe_mbx.h
index 3df9b159021..fe6ea81dc7f 100644
--- a/drivers/net/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ixgbe/ixgbe_mbx.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -86,7 +86,9 @@ s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
86s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16); 86s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
87s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16); 87s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
88s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16); 88s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
89#ifdef CONFIG_PCI_IOV
89void ixgbe_init_mbx_params_pf(struct ixgbe_hw *); 90void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
91#endif /* CONFIG_PCI_IOV */
90 92
91extern struct ixgbe_mbx_operations mbx_ops_generic; 93extern struct ixgbe_mbx_operations mbx_ops_generic;
92 94
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 8f7123e8fc0..197230b2d1a 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -138,17 +138,51 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
138 **/ 138 **/
139s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) 139s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
140{ 140{
141 u32 i;
142 u16 ctrl = 0;
143 s32 status = 0;
144
145 if (hw->phy.type == ixgbe_phy_unknown)
146 status = ixgbe_identify_phy_generic(hw);
147
148 if (status != 0 || hw->phy.type == ixgbe_phy_none)
149 goto out;
150
141 /* Don't reset PHY if it's shut down due to overtemp. */ 151 /* Don't reset PHY if it's shut down due to overtemp. */
142 if (!hw->phy.reset_if_overtemp && 152 if (!hw->phy.reset_if_overtemp &&
143 (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw))) 153 (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
144 return 0; 154 goto out;
145 155
146 /* 156 /*
147 * Perform soft PHY reset to the PHY_XS. 157 * Perform soft PHY reset to the PHY_XS.
148 * This will cause a soft reset to the PHY 158 * This will cause a soft reset to the PHY
149 */ 159 */
150 return hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, 160 hw->phy.ops.write_reg(hw, MDIO_CTRL1,
151 MDIO_CTRL1_RESET); 161 MDIO_MMD_PHYXS,
162 MDIO_CTRL1_RESET);
163
164 /*
165 * Poll for reset bit to self-clear indicating reset is complete.
166 * Some PHYs could take up to 3 seconds to complete and need about
167 * 1.7 usec delay after the reset is complete.
168 */
169 for (i = 0; i < 30; i++) {
170 msleep(100);
171 hw->phy.ops.read_reg(hw, MDIO_CTRL1,
172 MDIO_MMD_PHYXS, &ctrl);
173 if (!(ctrl & MDIO_CTRL1_RESET)) {
174 udelay(2);
175 break;
176 }
177 }
178
179 if (ctrl & MDIO_CTRL1_RESET) {
180 status = IXGBE_ERR_RESET_FAILED;
181 hw_dbg(hw, "PHY reset polling failed to complete.\n");
182 }
183
184out:
185 return status;
152} 186}
153 187
154/** 188/**
@@ -171,7 +205,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
171 else 205 else
172 gssr = IXGBE_GSSR_PHY0_SM; 206 gssr = IXGBE_GSSR_PHY0_SM;
173 207
174 if (ixgbe_acquire_swfw_sync(hw, gssr) != 0) 208 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
175 status = IXGBE_ERR_SWFW_SYNC; 209 status = IXGBE_ERR_SWFW_SYNC;
176 210
177 if (status == 0) { 211 if (status == 0) {
@@ -243,7 +277,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
243 } 277 }
244 } 278 }
245 279
246 ixgbe_release_swfw_sync(hw, gssr); 280 hw->mac.ops.release_swfw_sync(hw, gssr);
247 } 281 }
248 282
249 return status; 283 return status;
@@ -269,7 +303,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
269 else 303 else
270 gssr = IXGBE_GSSR_PHY0_SM; 304 gssr = IXGBE_GSSR_PHY0_SM;
271 305
272 if (ixgbe_acquire_swfw_sync(hw, gssr) != 0) 306 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
273 status = IXGBE_ERR_SWFW_SYNC; 307 status = IXGBE_ERR_SWFW_SYNC;
274 308
275 if (status == 0) { 309 if (status == 0) {
@@ -336,7 +370,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
336 } 370 }
337 } 371 }
338 372
339 ixgbe_release_swfw_sync(hw, gssr); 373 hw->mac.ops.release_swfw_sync(hw, gssr);
340 } 374 }
341 375
342 return status; 376 return status;
@@ -556,11 +590,10 @@ out:
556} 590}
557 591
558/** 592/**
559 * ixgbe_identify_sfp_module_generic - Identifies SFP module and assigns 593 * ixgbe_identify_sfp_module_generic - Identifies SFP modules
560 * the PHY type.
561 * @hw: pointer to hardware structure 594 * @hw: pointer to hardware structure
562 * 595 *
563 * Searches for and indentifies the SFP module. Assings appropriate PHY type. 596 * Searches for and identifies the SFP module and assigns appropriate PHY type.
564 **/ 597 **/
565s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) 598s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
566{ 599{
@@ -581,41 +614,62 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
581 goto out; 614 goto out;
582 } 615 }
583 616
584 status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER, 617 status = hw->phy.ops.read_i2c_eeprom(hw,
618 IXGBE_SFF_IDENTIFIER,
585 &identifier); 619 &identifier);
586 620
587 if (status == IXGBE_ERR_SFP_NOT_PRESENT || status == IXGBE_ERR_I2C) { 621 if (status == IXGBE_ERR_SWFW_SYNC ||
588 status = IXGBE_ERR_SFP_NOT_PRESENT; 622 status == IXGBE_ERR_I2C ||
589 hw->phy.sfp_type = ixgbe_sfp_type_not_present; 623 status == IXGBE_ERR_SFP_NOT_PRESENT)
590 if (hw->phy.type != ixgbe_phy_nl) { 624 goto err_read_i2c_eeprom;
591 hw->phy.id = 0;
592 hw->phy.type = ixgbe_phy_unknown;
593 }
594 goto out;
595 }
596 625
597 if (identifier == IXGBE_SFF_IDENTIFIER_SFP) { 626 /* LAN ID is needed for sfp_type determination */
598 hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_1GBE_COMP_CODES, 627 hw->mac.ops.set_lan_id(hw);
599 &comp_codes_1g); 628
600 hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_10GBE_COMP_CODES, 629 if (identifier != IXGBE_SFF_IDENTIFIER_SFP) {
601 &comp_codes_10g); 630 hw->phy.type = ixgbe_phy_sfp_unsupported;
602 hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_CABLE_TECHNOLOGY, 631 status = IXGBE_ERR_SFP_NOT_SUPPORTED;
603 &cable_tech); 632 } else {
604 633 status = hw->phy.ops.read_i2c_eeprom(hw,
605 /* ID Module 634 IXGBE_SFF_1GBE_COMP_CODES,
606 * ========= 635 &comp_codes_1g);
607 * 0 SFP_DA_CU 636
608 * 1 SFP_SR 637 if (status == IXGBE_ERR_SWFW_SYNC ||
609 * 2 SFP_LR 638 status == IXGBE_ERR_I2C ||
610 * 3 SFP_DA_CORE0 - 82599-specific 639 status == IXGBE_ERR_SFP_NOT_PRESENT)
611 * 4 SFP_DA_CORE1 - 82599-specific 640 goto err_read_i2c_eeprom;
612 * 5 SFP_SR/LR_CORE0 - 82599-specific 641
613 * 6 SFP_SR/LR_CORE1 - 82599-specific 642 status = hw->phy.ops.read_i2c_eeprom(hw,
614 * 7 SFP_act_lmt_DA_CORE0 - 82599-specific 643 IXGBE_SFF_10GBE_COMP_CODES,
615 * 8 SFP_act_lmt_DA_CORE1 - 82599-specific 644 &comp_codes_10g);
616 * 9 SFP_1g_cu_CORE0 - 82599-specific 645
617 * 10 SFP_1g_cu_CORE1 - 82599-specific 646 if (status == IXGBE_ERR_SWFW_SYNC ||
618 */ 647 status == IXGBE_ERR_I2C ||
648 status == IXGBE_ERR_SFP_NOT_PRESENT)
649 goto err_read_i2c_eeprom;
650 status = hw->phy.ops.read_i2c_eeprom(hw,
651 IXGBE_SFF_CABLE_TECHNOLOGY,
652 &cable_tech);
653
654 if (status == IXGBE_ERR_SWFW_SYNC ||
655 status == IXGBE_ERR_I2C ||
656 status == IXGBE_ERR_SFP_NOT_PRESENT)
657 goto err_read_i2c_eeprom;
658
659 /* ID Module
660 * =========
661 * 0 SFP_DA_CU
662 * 1 SFP_SR
663 * 2 SFP_LR
664 * 3 SFP_DA_CORE0 - 82599-specific
665 * 4 SFP_DA_CORE1 - 82599-specific
666 * 5 SFP_SR/LR_CORE0 - 82599-specific
667 * 6 SFP_SR/LR_CORE1 - 82599-specific
668 * 7 SFP_act_lmt_DA_CORE0 - 82599-specific
669 * 8 SFP_act_lmt_DA_CORE1 - 82599-specific
670 * 9 SFP_1g_cu_CORE0 - 82599-specific
671 * 10 SFP_1g_cu_CORE1 - 82599-specific
672 */
619 if (hw->mac.type == ixgbe_mac_82598EB) { 673 if (hw->mac.type == ixgbe_mac_82598EB) {
620 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) 674 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
621 hw->phy.sfp_type = ixgbe_sfp_type_da_cu; 675 hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
@@ -647,31 +701,27 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
647 ixgbe_sfp_type_da_act_lmt_core1; 701 ixgbe_sfp_type_da_act_lmt_core1;
648 } else { 702 } else {
649 hw->phy.sfp_type = 703 hw->phy.sfp_type =
650 ixgbe_sfp_type_unknown; 704 ixgbe_sfp_type_unknown;
651 } 705 }
652 } else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) 706 } else if (comp_codes_10g &
707 (IXGBE_SFF_10GBASESR_CAPABLE |
708 IXGBE_SFF_10GBASELR_CAPABLE)) {
653 if (hw->bus.lan_id == 0) 709 if (hw->bus.lan_id == 0)
654 hw->phy.sfp_type = 710 hw->phy.sfp_type =
655 ixgbe_sfp_type_srlr_core0; 711 ixgbe_sfp_type_srlr_core0;
656 else 712 else
657 hw->phy.sfp_type = 713 hw->phy.sfp_type =
658 ixgbe_sfp_type_srlr_core1; 714 ixgbe_sfp_type_srlr_core1;
659 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) 715 } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) {
660 if (hw->bus.lan_id == 0)
661 hw->phy.sfp_type =
662 ixgbe_sfp_type_srlr_core0;
663 else
664 hw->phy.sfp_type =
665 ixgbe_sfp_type_srlr_core1;
666 else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
667 if (hw->bus.lan_id == 0) 716 if (hw->bus.lan_id == 0)
668 hw->phy.sfp_type = 717 hw->phy.sfp_type =
669 ixgbe_sfp_type_1g_cu_core0; 718 ixgbe_sfp_type_1g_cu_core0;
670 else 719 else
671 hw->phy.sfp_type = 720 hw->phy.sfp_type =
672 ixgbe_sfp_type_1g_cu_core1; 721 ixgbe_sfp_type_1g_cu_core1;
673 else 722 } else {
674 hw->phy.sfp_type = ixgbe_sfp_type_unknown; 723 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
724 }
675 } 725 }
676 726
677 if (hw->phy.sfp_type != stored_sfp_type) 727 if (hw->phy.sfp_type != stored_sfp_type)
@@ -688,16 +738,33 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
688 /* Determine PHY vendor */ 738 /* Determine PHY vendor */
689 if (hw->phy.type != ixgbe_phy_nl) { 739 if (hw->phy.type != ixgbe_phy_nl) {
690 hw->phy.id = identifier; 740 hw->phy.id = identifier;
691 hw->phy.ops.read_i2c_eeprom(hw, 741 status = hw->phy.ops.read_i2c_eeprom(hw,
692 IXGBE_SFF_VENDOR_OUI_BYTE0, 742 IXGBE_SFF_VENDOR_OUI_BYTE0,
693 &oui_bytes[0]); 743 &oui_bytes[0]);
694 hw->phy.ops.read_i2c_eeprom(hw, 744
745 if (status == IXGBE_ERR_SWFW_SYNC ||
746 status == IXGBE_ERR_I2C ||
747 status == IXGBE_ERR_SFP_NOT_PRESENT)
748 goto err_read_i2c_eeprom;
749
750 status = hw->phy.ops.read_i2c_eeprom(hw,
695 IXGBE_SFF_VENDOR_OUI_BYTE1, 751 IXGBE_SFF_VENDOR_OUI_BYTE1,
696 &oui_bytes[1]); 752 &oui_bytes[1]);
697 hw->phy.ops.read_i2c_eeprom(hw, 753
754 if (status == IXGBE_ERR_SWFW_SYNC ||
755 status == IXGBE_ERR_I2C ||
756 status == IXGBE_ERR_SFP_NOT_PRESENT)
757 goto err_read_i2c_eeprom;
758
759 status = hw->phy.ops.read_i2c_eeprom(hw,
698 IXGBE_SFF_VENDOR_OUI_BYTE2, 760 IXGBE_SFF_VENDOR_OUI_BYTE2,
699 &oui_bytes[2]); 761 &oui_bytes[2]);
700 762
763 if (status == IXGBE_ERR_SWFW_SYNC ||
764 status == IXGBE_ERR_I2C ||
765 status == IXGBE_ERR_SFP_NOT_PRESENT)
766 goto err_read_i2c_eeprom;
767
701 vendor_oui = 768 vendor_oui =
702 ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) | 769 ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
703 (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) | 770 (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
@@ -707,7 +774,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
707 case IXGBE_SFF_VENDOR_OUI_TYCO: 774 case IXGBE_SFF_VENDOR_OUI_TYCO:
708 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) 775 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
709 hw->phy.type = 776 hw->phy.type =
710 ixgbe_phy_sfp_passive_tyco; 777 ixgbe_phy_sfp_passive_tyco;
711 break; 778 break;
712 case IXGBE_SFF_VENDOR_OUI_FTL: 779 case IXGBE_SFF_VENDOR_OUI_FTL:
713 if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) 780 if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
@@ -724,7 +791,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
724 default: 791 default:
725 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) 792 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
726 hw->phy.type = 793 hw->phy.type =
727 ixgbe_phy_sfp_passive_unknown; 794 ixgbe_phy_sfp_passive_unknown;
728 else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) 795 else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
729 hw->phy.type = 796 hw->phy.type =
730 ixgbe_phy_sfp_active_unknown; 797 ixgbe_phy_sfp_active_unknown;
@@ -734,7 +801,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
734 } 801 }
735 } 802 }
736 803
737 /* All passive DA cables are supported */ 804 /* Allow any DA cable vendor */
738 if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE | 805 if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE |
739 IXGBE_SFF_DA_ACTIVE_CABLE)) { 806 IXGBE_SFF_DA_ACTIVE_CABLE)) {
740 status = 0; 807 status = 0;
@@ -776,15 +843,24 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
776 843
777out: 844out:
778 return status; 845 return status;
846
847err_read_i2c_eeprom:
848 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
849 if (hw->phy.type != ixgbe_phy_nl) {
850 hw->phy.id = 0;
851 hw->phy.type = ixgbe_phy_unknown;
852 }
853 return IXGBE_ERR_SFP_NOT_PRESENT;
779} 854}
780 855
781/** 856/**
782 * ixgbe_get_sfp_init_sequence_offsets - Checks the MAC's EEPROM to see 857 * ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence
783 * if it supports a given SFP+ module type, if so it returns the offsets to the
784 * phy init sequence block.
785 * @hw: pointer to hardware structure 858 * @hw: pointer to hardware structure
786 * @list_offset: offset to the SFP ID list 859 * @list_offset: offset to the SFP ID list
787 * @data_offset: offset to the SFP data block 860 * @data_offset: offset to the SFP data block
861 *
862 * Checks the MAC's EEPROM to see if it supports a given SFP+ module type, if
863 * so it returns the offsets to the phy init sequence block.
788 **/ 864 **/
789s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, 865s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
790 u16 *list_offset, 866 u16 *list_offset,
@@ -899,11 +975,22 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
899 u8 dev_addr, u8 *data) 975 u8 dev_addr, u8 *data)
900{ 976{
901 s32 status = 0; 977 s32 status = 0;
902 u32 max_retry = 1; 978 u32 max_retry = 10;
903 u32 retry = 0; 979 u32 retry = 0;
980 u16 swfw_mask = 0;
904 bool nack = 1; 981 bool nack = 1;
905 982
983 if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
984 swfw_mask = IXGBE_GSSR_PHY1_SM;
985 else
986 swfw_mask = IXGBE_GSSR_PHY0_SM;
987
906 do { 988 do {
989 if (ixgbe_acquire_swfw_sync(hw, swfw_mask) != 0) {
990 status = IXGBE_ERR_SWFW_SYNC;
991 goto read_byte_out;
992 }
993
907 ixgbe_i2c_start(hw); 994 ixgbe_i2c_start(hw);
908 995
909 /* Device Address and write indication */ 996 /* Device Address and write indication */
@@ -946,6 +1033,8 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
946 break; 1033 break;
947 1034
948fail: 1035fail:
1036 ixgbe_release_swfw_sync(hw, swfw_mask);
1037 msleep(100);
949 ixgbe_i2c_bus_clear(hw); 1038 ixgbe_i2c_bus_clear(hw);
950 retry++; 1039 retry++;
951 if (retry < max_retry) 1040 if (retry < max_retry)
@@ -955,6 +1044,9 @@ fail:
955 1044
956 } while (retry < max_retry); 1045 } while (retry < max_retry);
957 1046
1047 ixgbe_release_swfw_sync(hw, swfw_mask);
1048
1049read_byte_out:
958 return status; 1050 return status;
959} 1051}
960 1052
@@ -973,6 +1065,17 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
973 s32 status = 0; 1065 s32 status = 0;
974 u32 max_retry = 1; 1066 u32 max_retry = 1;
975 u32 retry = 0; 1067 u32 retry = 0;
1068 u16 swfw_mask = 0;
1069
1070 if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
1071 swfw_mask = IXGBE_GSSR_PHY1_SM;
1072 else
1073 swfw_mask = IXGBE_GSSR_PHY0_SM;
1074
1075 if (ixgbe_acquire_swfw_sync(hw, swfw_mask) != 0) {
1076 status = IXGBE_ERR_SWFW_SYNC;
1077 goto write_byte_out;
1078 }
976 1079
977 do { 1080 do {
978 ixgbe_i2c_start(hw); 1081 ixgbe_i2c_start(hw);
@@ -1013,6 +1116,9 @@ fail:
1013 hw_dbg(hw, "I2C byte write error.\n"); 1116 hw_dbg(hw, "I2C byte write error.\n");
1014 } while (retry < max_retry); 1117 } while (retry < max_retry);
1015 1118
1119 ixgbe_release_swfw_sync(hw, swfw_mask);
1120
1121write_byte_out:
1016 return status; 1122 return status;
1017} 1123}
1018 1124
@@ -1331,6 +1437,8 @@ static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
1331 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); 1437 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
1332 u32 i; 1438 u32 i;
1333 1439
1440 ixgbe_i2c_start(hw);
1441
1334 ixgbe_set_i2c_data(hw, &i2cctl, 1); 1442 ixgbe_set_i2c_data(hw, &i2cctl, 1);
1335 1443
1336 for (i = 0; i < 9; i++) { 1444 for (i = 0; i < 9; i++) {
@@ -1345,6 +1453,8 @@ static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
1345 udelay(IXGBE_I2C_T_LOW); 1453 udelay(IXGBE_I2C_T_LOW);
1346 } 1454 }
1347 1455
1456 ixgbe_i2c_start(hw);
1457
1348 /* Put the i2c bus back to default state */ 1458 /* Put the i2c bus back to default state */
1349 ixgbe_i2c_stop(hw); 1459 ixgbe_i2c_stop(hw);
1350} 1460}
diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h
index e2c6b7eac64..2327baf0442 100644
--- a/drivers/net/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ixgbe/ixgbe_phy.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
index 187b3a16ec1..58c9b45989f 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -110,6 +110,33 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
110 return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); 110 return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
111} 111}
112 112
113void ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf)
114{
115 struct ixgbe_hw *hw = &adapter->hw;
116 int new_mtu = msgbuf[1];
117 u32 max_frs;
118 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
119
120 /* Only X540 supports jumbo frames in IOV mode */
121 if (adapter->hw.mac.type != ixgbe_mac_X540)
122 return;
123
124 /* MTU < 68 is an error and causes problems on some kernels */
125 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) {
126 e_err(drv, "VF mtu %d out of range\n", new_mtu);
127 return;
128 }
129
130 max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) &
131 IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
132 if (max_frs < new_mtu) {
133 max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT;
134 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
135 }
136
137 e_info(hw, "VF requests change max MTU to %d\n", new_mtu);
138}
139
113static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) 140static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
114{ 141{
115 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); 142 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
@@ -302,7 +329,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
302 hash_list, vf); 329 hash_list, vf);
303 break; 330 break;
304 case IXGBE_VF_SET_LPE: 331 case IXGBE_VF_SET_LPE:
305 WARN_ON((msgbuf[0] & 0xFFFF) == IXGBE_VF_SET_LPE); 332 ixgbe_set_vf_lpe(adapter, msgbuf);
306 break; 333 break;
307 case IXGBE_VF_SET_VLAN: 334 case IXGBE_VF_SET_VLAN:
308 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) 335 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
diff --git a/drivers/net/ixgbe/ixgbe_sriov.h b/drivers/net/ixgbe/ixgbe_sriov.h
index 49dc14debef..e7dd029d576 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ixgbe/ixgbe_sriov.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index fd3358f5413..013751db5fc 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -91,7 +91,7 @@
91 91
92/* General Receive Control */ 92/* General Receive Control */
93#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */ 93#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */
94#define IXGBE_GRC_APME 0x00000002 /* Advanced Power Management Enable */ 94#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */
95 95
96#define IXGBE_VPDDIAG0 0x10204 96#define IXGBE_VPDDIAG0 0x10204
97#define IXGBE_VPDDIAG1 0x10208 97#define IXGBE_VPDDIAG1 0x10208
@@ -342,7 +342,7 @@
342/* Wake Up Control */ 342/* Wake Up Control */
343#define IXGBE_WUC_PME_EN 0x00000002 /* PME Enable */ 343#define IXGBE_WUC_PME_EN 0x00000002 /* PME Enable */
344#define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */ 344#define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */
345#define IXGBE_WUC_ADVD3WUC 0x00000010 /* D3Cold wake up cap. enable*/ 345#define IXGBE_WUC_WKEN 0x00000010 /* Enable PE_WAKE_N pin assertion */
346 346
347/* Wake Up Filter Control */ 347/* Wake Up Filter Control */
348#define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ 348#define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
@@ -1614,6 +1614,8 @@
1614#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt. WWN base exists */ 1614#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt. WWN base exists */
1615 1615
1616/* PCI Bus Info */ 1616/* PCI Bus Info */
1617#define IXGBE_PCI_DEVICE_STATUS 0xAA
1618#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020
1617#define IXGBE_PCI_LINK_STATUS 0xB2 1619#define IXGBE_PCI_LINK_STATUS 0xB2
1618#define IXGBE_PCI_DEVICE_CONTROL2 0xC8 1620#define IXGBE_PCI_DEVICE_CONTROL2 0xC8
1619#define IXGBE_PCI_LINK_WIDTH 0x3F0 1621#define IXGBE_PCI_LINK_WIDTH 0x3F0
@@ -1680,6 +1682,8 @@
1680#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */ 1682#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */
1681#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ 1683#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
1682#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ 1684#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
1685#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */
1686#define IXGBE_RXDCTL_RLPML_EN 0x00008000
1683 1687
1684#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */ 1688#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */
1685#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/ 1689#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/
@@ -2240,6 +2244,7 @@ enum ixgbe_mac_type {
2240 2244
2241enum ixgbe_phy_type { 2245enum ixgbe_phy_type {
2242 ixgbe_phy_unknown = 0, 2246 ixgbe_phy_unknown = 0,
2247 ixgbe_phy_none,
2243 ixgbe_phy_tn, 2248 ixgbe_phy_tn,
2244 ixgbe_phy_aq, 2249 ixgbe_phy_aq,
2245 ixgbe_phy_cu_unknown, 2250 ixgbe_phy_cu_unknown,
@@ -2328,32 +2333,31 @@ enum ixgbe_bus_type {
2328/* PCI bus speeds */ 2333/* PCI bus speeds */
2329enum ixgbe_bus_speed { 2334enum ixgbe_bus_speed {
2330 ixgbe_bus_speed_unknown = 0, 2335 ixgbe_bus_speed_unknown = 0,
2331 ixgbe_bus_speed_33, 2336 ixgbe_bus_speed_33 = 33,
2332 ixgbe_bus_speed_66, 2337 ixgbe_bus_speed_66 = 66,
2333 ixgbe_bus_speed_100, 2338 ixgbe_bus_speed_100 = 100,
2334 ixgbe_bus_speed_120, 2339 ixgbe_bus_speed_120 = 120,
2335 ixgbe_bus_speed_133, 2340 ixgbe_bus_speed_133 = 133,
2336 ixgbe_bus_speed_2500, 2341 ixgbe_bus_speed_2500 = 2500,
2337 ixgbe_bus_speed_5000, 2342 ixgbe_bus_speed_5000 = 5000,
2338 ixgbe_bus_speed_reserved 2343 ixgbe_bus_speed_reserved
2339}; 2344};
2340 2345
2341/* PCI bus widths */ 2346/* PCI bus widths */
2342enum ixgbe_bus_width { 2347enum ixgbe_bus_width {
2343 ixgbe_bus_width_unknown = 0, 2348 ixgbe_bus_width_unknown = 0,
2344 ixgbe_bus_width_pcie_x1, 2349 ixgbe_bus_width_pcie_x1 = 1,
2345 ixgbe_bus_width_pcie_x2, 2350 ixgbe_bus_width_pcie_x2 = 2,
2346 ixgbe_bus_width_pcie_x4 = 4, 2351 ixgbe_bus_width_pcie_x4 = 4,
2347 ixgbe_bus_width_pcie_x8 = 8, 2352 ixgbe_bus_width_pcie_x8 = 8,
2348 ixgbe_bus_width_32, 2353 ixgbe_bus_width_32 = 32,
2349 ixgbe_bus_width_64, 2354 ixgbe_bus_width_64 = 64,
2350 ixgbe_bus_width_reserved 2355 ixgbe_bus_width_reserved
2351}; 2356};
2352 2357
2353struct ixgbe_addr_filter_info { 2358struct ixgbe_addr_filter_info {
2354 u32 num_mc_addrs; 2359 u32 num_mc_addrs;
2355 u32 rar_used_count; 2360 u32 rar_used_count;
2356 u32 mc_addr_in_rar_count;
2357 u32 mta_in_use; 2361 u32 mta_in_use;
2358 u32 overflow_promisc; 2362 u32 overflow_promisc;
2359 bool uc_set_promisc; 2363 bool uc_set_promisc;
@@ -2491,6 +2495,8 @@ struct ixgbe_mac_operations {
2491 s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8); 2495 s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
2492 s32 (*setup_sfp)(struct ixgbe_hw *); 2496 s32 (*setup_sfp)(struct ixgbe_hw *);
2493 s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); 2497 s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
2498 s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16);
2499 void (*release_swfw_sync)(struct ixgbe_hw *, u16);
2494 2500
2495 /* Link */ 2501 /* Link */
2496 void (*disable_tx_laser)(struct ixgbe_hw *); 2502 void (*disable_tx_laser)(struct ixgbe_hw *);
@@ -2513,7 +2519,6 @@ struct ixgbe_mac_operations {
2513 s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32); 2519 s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
2514 s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32); 2520 s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
2515 s32 (*init_rx_addrs)(struct ixgbe_hw *); 2521 s32 (*init_rx_addrs)(struct ixgbe_hw *);
2516 s32 (*update_uc_addr_list)(struct ixgbe_hw *, struct net_device *);
2517 s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *); 2522 s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
2518 s32 (*enable_mc)(struct ixgbe_hw *); 2523 s32 (*enable_mc)(struct ixgbe_hw *);
2519 s32 (*disable_mc)(struct ixgbe_hw *); 2524 s32 (*disable_mc)(struct ixgbe_hw *);
@@ -2554,6 +2559,7 @@ struct ixgbe_eeprom_info {
2554 u16 address_bits; 2559 u16 address_bits;
2555}; 2560};
2556 2561
2562#define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
2557struct ixgbe_mac_info { 2563struct ixgbe_mac_info {
2558 struct ixgbe_mac_operations ops; 2564 struct ixgbe_mac_operations ops;
2559 enum ixgbe_mac_type type; 2565 enum ixgbe_mac_type type;
@@ -2564,6 +2570,8 @@ struct ixgbe_mac_info {
2564 u16 wwnn_prefix; 2570 u16 wwnn_prefix;
2565 /* prefix for World Wide Port Name (WWPN) */ 2571 /* prefix for World Wide Port Name (WWPN) */
2566 u16 wwpn_prefix; 2572 u16 wwpn_prefix;
2573#define IXGBE_MAX_MTA 128
2574 u32 mta_shadow[IXGBE_MAX_MTA];
2567 s32 mc_filter_type; 2575 s32 mc_filter_type;
2568 u32 mcft_size; 2576 u32 mcft_size;
2569 u32 vft_size; 2577 u32 vft_size;
@@ -2576,6 +2584,7 @@ struct ixgbe_mac_info {
2576 u32 orig_autoc2; 2584 u32 orig_autoc2;
2577 bool orig_link_settings_stored; 2585 bool orig_link_settings_stored;
2578 bool autotry_restart; 2586 bool autotry_restart;
2587 u8 flags;
2579}; 2588};
2580 2589
2581struct ixgbe_phy_info { 2590struct ixgbe_phy_info {
@@ -2682,7 +2691,6 @@ struct ixgbe_info {
2682#define IXGBE_ERR_EEPROM_VERSION -24 2691#define IXGBE_ERR_EEPROM_VERSION -24
2683#define IXGBE_ERR_NO_SPACE -25 2692#define IXGBE_ERR_NO_SPACE -25
2684#define IXGBE_ERR_OVERTEMP -26 2693#define IXGBE_ERR_OVERTEMP -26
2685#define IXGBE_ERR_RAR_INDEX -27
2686#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30 2694#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30
2687#define IXGBE_ERR_PBA_SECTION -31 2695#define IXGBE_ERR_PBA_SECTION -31
2688#define IXGBE_ERR_INVALID_ARGUMENT -32 2696#define IXGBE_ERR_INVALID_ARGUMENT -32
diff --git a/drivers/net/ixgbe/ixgbe_x540.c b/drivers/net/ixgbe/ixgbe_x540.c
index f2518b01067..2e3a2b4fa8b 100644
--- a/drivers/net/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ixgbe/ixgbe_x540.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -31,7 +31,6 @@
31 31
32#include "ixgbe.h" 32#include "ixgbe.h"
33#include "ixgbe_phy.h" 33#include "ixgbe_phy.h"
34//#include "ixgbe_mbx.h"
35 34
36#define IXGBE_X540_MAX_TX_QUEUES 128 35#define IXGBE_X540_MAX_TX_QUEUES 128
37#define IXGBE_X540_MAX_RX_QUEUES 128 36#define IXGBE_X540_MAX_RX_QUEUES 128
@@ -110,12 +109,9 @@ static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
110 * Prevent the PCI-E bus from from hanging by disabling PCI-E master 109 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
111 * access and verify no pending requests before reset 110 * access and verify no pending requests before reset
112 */ 111 */
113 status = ixgbe_disable_pcie_master(hw); 112 ixgbe_disable_pcie_master(hw);
114 if (status != 0) {
115 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
116 hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
117 }
118 113
114mac_reset_top:
119 /* 115 /*
120 * Issue global reset to the MAC. Needs to be SW reset if link is up. 116 * Issue global reset to the MAC. Needs to be SW reset if link is up.
121 * If link reset is used when link is up, it might reset the PHY when 117 * If link reset is used when link is up, it might reset the PHY when
@@ -148,6 +144,19 @@ static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
148 hw_dbg(hw, "Reset polling failed to complete.\n"); 144 hw_dbg(hw, "Reset polling failed to complete.\n");
149 } 145 }
150 146
147 /*
148 * Double resets are required for recovery from certain error
149 * conditions. Between resets, it is necessary to stall to allow time
150 * for any pending HW events to complete. We use 1usec since that is
151 * what is needed for ixgbe_disable_pcie_master(). The second reset
152 * then clears out any effects of those events.
153 */
154 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
155 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
156 udelay(1);
157 goto mac_reset_top;
158 }
159
151 /* Clear PF Reset Done bit so PF/VF Mail Ops can work */ 160 /* Clear PF Reset Done bit so PF/VF Mail Ops can work */
152 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 161 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
153 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 162 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
@@ -191,7 +200,7 @@ static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
191 * clear the multicast table. Also reset num_rar_entries to 128, 200 * clear the multicast table. Also reset num_rar_entries to 128,
192 * since we modify this value when programming the SAN MAC address. 201 * since we modify this value when programming the SAN MAC address.
193 */ 202 */
194 hw->mac.num_rar_entries = 128; 203 hw->mac.num_rar_entries = IXGBE_X540_MAX_TX_QUEUES;
195 hw->mac.ops.init_rx_addrs(hw); 204 hw->mac.ops.init_rx_addrs(hw);
196 205
197 /* Store the permanent mac address */ 206 /* Store the permanent mac address */
@@ -278,7 +287,7 @@ static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
278{ 287{
279 s32 status; 288 s32 status;
280 289
281 if (ixgbe_acquire_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM) == 0) 290 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0)
282 status = ixgbe_read_eerd_generic(hw, offset, data); 291 status = ixgbe_read_eerd_generic(hw, offset, data);
283 else 292 else
284 status = IXGBE_ERR_SWFW_SYNC; 293 status = IXGBE_ERR_SWFW_SYNC;
@@ -311,7 +320,7 @@ static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
311 (data << IXGBE_EEPROM_RW_REG_DATA) | 320 (data << IXGBE_EEPROM_RW_REG_DATA) |
312 IXGBE_EEPROM_RW_REG_START; 321 IXGBE_EEPROM_RW_REG_START;
313 322
314 if (ixgbe_acquire_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM) == 0) { 323 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) {
315 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); 324 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
316 if (status != 0) { 325 if (status != 0) {
317 hw_dbg(hw, "Eeprom write EEWR timed out\n"); 326 hw_dbg(hw, "Eeprom write EEWR timed out\n");
@@ -676,7 +685,6 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
676 .set_vmdq = &ixgbe_set_vmdq_generic, 685 .set_vmdq = &ixgbe_set_vmdq_generic,
677 .clear_vmdq = &ixgbe_clear_vmdq_generic, 686 .clear_vmdq = &ixgbe_clear_vmdq_generic,
678 .init_rx_addrs = &ixgbe_init_rx_addrs_generic, 687 .init_rx_addrs = &ixgbe_init_rx_addrs_generic,
679 .update_uc_addr_list = &ixgbe_update_uc_addr_list_generic,
680 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, 688 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
681 .enable_mc = &ixgbe_enable_mc_generic, 689 .enable_mc = &ixgbe_enable_mc_generic,
682 .disable_mc = &ixgbe_disable_mc_generic, 690 .disable_mc = &ixgbe_disable_mc_generic,
@@ -687,6 +695,8 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
687 .setup_sfp = NULL, 695 .setup_sfp = NULL,
688 .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, 696 .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing,
689 .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, 697 .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
698 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540,
699 .release_swfw_sync = &ixgbe_release_swfw_sync_X540,
690}; 700};
691 701
692static struct ixgbe_eeprom_operations eeprom_ops_X540 = { 702static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
@@ -702,7 +712,7 @@ static struct ixgbe_phy_operations phy_ops_X540 = {
702 .identify = &ixgbe_identify_phy_generic, 712 .identify = &ixgbe_identify_phy_generic,
703 .identify_sfp = &ixgbe_identify_sfp_module_generic, 713 .identify_sfp = &ixgbe_identify_sfp_module_generic,
704 .init = NULL, 714 .init = NULL,
705 .reset = &ixgbe_reset_phy_generic, 715 .reset = NULL,
706 .read_reg = &ixgbe_read_phy_reg_generic, 716 .read_reg = &ixgbe_read_phy_reg_generic,
707 .write_reg = &ixgbe_write_phy_reg_generic, 717 .write_reg = &ixgbe_write_phy_reg_generic,
708 .setup_link = &ixgbe_setup_phy_link_generic, 718 .setup_link = &ixgbe_setup_phy_link_generic,
diff --git a/drivers/net/ixgbevf/defines.h b/drivers/net/ixgbevf/defines.h
index de643eb2ada..78abb6f1a86 100644
--- a/drivers/net/ixgbevf/defines.h
+++ b/drivers/net/ixgbevf/defines.h
@@ -65,6 +65,8 @@ typedef u32 ixgbe_link_speed;
65#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */ 65#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */
66#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ 66#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
67#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ 67#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
68#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */
69#define IXGBE_RXDCTL_RLPML_EN 0x00008000
68 70
69/* DCA Control */ 71/* DCA Control */
70#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ 72#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
index 464e6c9d3fc..82768812552 100644
--- a/drivers/net/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -51,7 +51,7 @@ char ixgbevf_driver_name[] = "ixgbevf";
51static const char ixgbevf_driver_string[] = 51static const char ixgbevf_driver_string[] =
52 "Intel(R) 82599 Virtual Function"; 52 "Intel(R) 82599 Virtual Function";
53 53
54#define DRV_VERSION "1.0.19-k0" 54#define DRV_VERSION "1.1.0-k0"
55const char ixgbevf_driver_version[] = DRV_VERSION; 55const char ixgbevf_driver_version[] = DRV_VERSION;
56static char ixgbevf_copyright[] = 56static char ixgbevf_copyright[] =
57 "Copyright (c) 2009 - 2010 Intel Corporation."; 57 "Copyright (c) 2009 - 2010 Intel Corporation.";
@@ -107,7 +107,7 @@ static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
107} 107}
108 108
109/* 109/*
110 * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors 110 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
111 * @adapter: pointer to adapter struct 111 * @adapter: pointer to adapter struct
112 * @direction: 0 for Rx, 1 for Tx, -1 for other causes 112 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
113 * @queue: queue to map the corresponding interrupt to 113 * @queue: queue to map the corresponding interrupt to
@@ -178,8 +178,6 @@ static inline bool ixgbevf_check_tx_hang(struct ixgbevf_adapter *adapter,
178 tx_ring->tx_buffer_info[eop].time_stamp && 178 tx_ring->tx_buffer_info[eop].time_stamp &&
179 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ)) { 179 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ)) {
180 /* detected Tx unit hang */ 180 /* detected Tx unit hang */
181 union ixgbe_adv_tx_desc *tx_desc;
182 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
183 printk(KERN_ERR "Detected Tx Unit Hang\n" 181 printk(KERN_ERR "Detected Tx Unit Hang\n"
184 " Tx Queue <%d>\n" 182 " Tx Queue <%d>\n"
185 " TDH, TDT <%x>, <%x>\n" 183 " TDH, TDT <%x>, <%x>\n"
@@ -334,7 +332,6 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
334 struct ixgbevf_adapter *adapter = q_vector->adapter; 332 struct ixgbevf_adapter *adapter = q_vector->adapter;
335 bool is_vlan = (status & IXGBE_RXD_STAT_VP); 333 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
336 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); 334 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
337 int ret;
338 335
339 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) { 336 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
340 if (adapter->vlgrp && is_vlan) 337 if (adapter->vlgrp && is_vlan)
@@ -345,9 +342,9 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
345 napi_gro_receive(&q_vector->napi, skb); 342 napi_gro_receive(&q_vector->napi, skb);
346 } else { 343 } else {
347 if (adapter->vlgrp && is_vlan) 344 if (adapter->vlgrp && is_vlan)
348 ret = vlan_hwaccel_rx(skb, adapter->vlgrp, tag); 345 vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
349 else 346 else
350 ret = netif_rx(skb); 347 netif_rx(skb);
351 } 348 }
352} 349}
353 350
@@ -1017,7 +1014,7 @@ static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
1017} 1014}
1018 1015
1019/** 1016/**
1020 * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues) 1017 * ixgbevf_msix_clean_rx - single unshared vector rx clean (all queues)
1021 * @irq: unused 1018 * @irq: unused
1022 * @data: pointer to our q_vector struct for this interrupt vector 1019 * @data: pointer to our q_vector struct for this interrupt vector
1023 **/ 1020 **/
@@ -1665,6 +1662,11 @@ static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1665 j = adapter->rx_ring[i].reg_idx; 1662 j = adapter->rx_ring[i].reg_idx;
1666 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)); 1663 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1667 rxdctl |= IXGBE_RXDCTL_ENABLE; 1664 rxdctl |= IXGBE_RXDCTL_ENABLE;
1665 if (hw->mac.type == ixgbe_mac_X540_vf) {
1666 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
1667 rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) |
1668 IXGBE_RXDCTL_RLPML_EN);
1669 }
1668 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl); 1670 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1669 ixgbevf_rx_desc_queue_enable(adapter, i); 1671 ixgbevf_rx_desc_queue_enable(adapter, i);
1670 } 1672 }
@@ -1967,7 +1969,7 @@ static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1967} 1969}
1968 1970
1969/* 1971/*
1970 * ixgbe_set_num_queues: Allocate queues for device, feature dependant 1972 * ixgbevf_set_num_queues: Allocate queues for device, feature dependant
1971 * @adapter: board private structure to initialize 1973 * @adapter: board private structure to initialize
1972 * 1974 *
1973 * This is the top level queue allocation routine. The order here is very 1975 * This is the top level queue allocation routine. The order here is very
@@ -2216,7 +2218,7 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2216 2218
2217 hw->vendor_id = pdev->vendor; 2219 hw->vendor_id = pdev->vendor;
2218 hw->device_id = pdev->device; 2220 hw->device_id = pdev->device;
2219 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); 2221 hw->revision_id = pdev->revision;
2220 hw->subsystem_vendor_id = pdev->subsystem_vendor; 2222 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2221 hw->subsystem_device_id = pdev->subsystem_device; 2223 hw->subsystem_device_id = pdev->subsystem_device;
2222 2224
@@ -3217,10 +3219,16 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3217static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) 3219static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3218{ 3220{
3219 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3221 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3222 struct ixgbe_hw *hw = &adapter->hw;
3220 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 3223 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3224 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
3225 u32 msg[2];
3226
3227 if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
3228 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3221 3229
3222 /* MTU < 68 is an error and causes problems on some kernels */ 3230 /* MTU < 68 is an error and causes problems on some kernels */
3223 if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE)) 3231 if ((new_mtu < 68) || (max_frame > max_possible_frame))
3224 return -EINVAL; 3232 return -EINVAL;
3225 3233
3226 hw_dbg(&adapter->hw, "changing MTU from %d to %d\n", 3234 hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
@@ -3228,6 +3236,10 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3228 /* must set new MTU before calling down or up */ 3236 /* must set new MTU before calling down or up */
3229 netdev->mtu = new_mtu; 3237 netdev->mtu = new_mtu;
3230 3238
3239 msg[0] = IXGBE_VF_SET_LPE;
3240 msg[1] = max_frame;
3241 hw->mbx.ops.write_posted(hw, msg, 2);
3242
3231 if (netif_running(netdev)) 3243 if (netif_running(netdev))
3232 ixgbevf_reinit_locked(adapter); 3244 ixgbevf_reinit_locked(adapter);
3233 3245
@@ -3272,8 +3284,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
3272 3284
3273static void ixgbevf_assign_netdev_ops(struct net_device *dev) 3285static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3274{ 3286{
3275 struct ixgbevf_adapter *adapter;
3276 adapter = netdev_priv(dev);
3277 dev->netdev_ops = &ixgbe_netdev_ops; 3287 dev->netdev_ops = &ixgbe_netdev_ops;
3278 ixgbevf_set_ethtool_ops(dev); 3288 ixgbevf_set_ethtool_ops(dev);
3279 dev->watchdog_timeo = 5 * HZ; 3289 dev->watchdog_timeo = 5 * HZ;
@@ -3519,9 +3529,9 @@ static struct pci_driver ixgbevf_driver = {
3519}; 3529};
3520 3530
3521/** 3531/**
3522 * ixgbe_init_module - Driver Registration Routine 3532 * ixgbevf_init_module - Driver Registration Routine
3523 * 3533 *
3524 * ixgbe_init_module is the first routine called when the driver is 3534 * ixgbevf_init_module is the first routine called when the driver is
3525 * loaded. All it does is register with the PCI subsystem. 3535 * loaded. All it does is register with the PCI subsystem.
3526 **/ 3536 **/
3527static int __init ixgbevf_init_module(void) 3537static int __init ixgbevf_init_module(void)
@@ -3539,9 +3549,9 @@ static int __init ixgbevf_init_module(void)
3539module_init(ixgbevf_init_module); 3549module_init(ixgbevf_init_module);
3540 3550
3541/** 3551/**
3542 * ixgbe_exit_module - Driver Exit Cleanup Routine 3552 * ixgbevf_exit_module - Driver Exit Cleanup Routine
3543 * 3553 *
3544 * ixgbe_exit_module is called just before the driver is removed 3554 * ixgbevf_exit_module is called just before the driver is removed
3545 * from memory. 3555 * from memory.
3546 **/ 3556 **/
3547static void __exit ixgbevf_exit_module(void) 3557static void __exit ixgbevf_exit_module(void)
@@ -3551,7 +3561,7 @@ static void __exit ixgbevf_exit_module(void)
3551 3561
3552#ifdef DEBUG 3562#ifdef DEBUG
3553/** 3563/**
3554 * ixgbe_get_hw_dev_name - return device name string 3564 * ixgbevf_get_hw_dev_name - return device name string
3555 * used by hardware layer to print debugging information 3565 * used by hardware layer to print debugging information
3556 **/ 3566 **/
3557char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw) 3567char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index e97ebef3cf4..f690474f440 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -161,6 +161,67 @@ jme_setup_wakeup_frame(struct jme_adapter *jme,
161} 161}
162 162
163static inline void 163static inline void
164jme_mac_rxclk_off(struct jme_adapter *jme)
165{
166 jme->reg_gpreg1 |= GPREG1_RXCLKOFF;
167 jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1);
168}
169
170static inline void
171jme_mac_rxclk_on(struct jme_adapter *jme)
172{
173 jme->reg_gpreg1 &= ~GPREG1_RXCLKOFF;
174 jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1);
175}
176
177static inline void
178jme_mac_txclk_off(struct jme_adapter *jme)
179{
180 jme->reg_ghc &= ~(GHC_TO_CLK_SRC | GHC_TXMAC_CLK_SRC);
181 jwrite32f(jme, JME_GHC, jme->reg_ghc);
182}
183
184static inline void
185jme_mac_txclk_on(struct jme_adapter *jme)
186{
187 u32 speed = jme->reg_ghc & GHC_SPEED;
188 if (speed == GHC_SPEED_1000M)
189 jme->reg_ghc |= GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY;
190 else
191 jme->reg_ghc |= GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
192 jwrite32f(jme, JME_GHC, jme->reg_ghc);
193}
194
195static inline void
196jme_reset_ghc_speed(struct jme_adapter *jme)
197{
198 jme->reg_ghc &= ~(GHC_SPEED | GHC_DPX);
199 jwrite32f(jme, JME_GHC, jme->reg_ghc);
200}
201
202static inline void
203jme_reset_250A2_workaround(struct jme_adapter *jme)
204{
205 jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH |
206 GPREG1_RSSPATCH);
207 jwrite32(jme, JME_GPREG1, jme->reg_gpreg1);
208}
209
210static inline void
211jme_assert_ghc_reset(struct jme_adapter *jme)
212{
213 jme->reg_ghc |= GHC_SWRST;
214 jwrite32f(jme, JME_GHC, jme->reg_ghc);
215}
216
217static inline void
218jme_clear_ghc_reset(struct jme_adapter *jme)
219{
220 jme->reg_ghc &= ~GHC_SWRST;
221 jwrite32f(jme, JME_GHC, jme->reg_ghc);
222}
223
224static inline void
164jme_reset_mac_processor(struct jme_adapter *jme) 225jme_reset_mac_processor(struct jme_adapter *jme)
165{ 226{
166 static const u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0}; 227 static const u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0};
@@ -168,9 +229,24 @@ jme_reset_mac_processor(struct jme_adapter *jme)
168 u32 gpreg0; 229 u32 gpreg0;
169 int i; 230 int i;
170 231
171 jwrite32(jme, JME_GHC, jme->reg_ghc | GHC_SWRST); 232 jme_reset_ghc_speed(jme);
172 udelay(2); 233 jme_reset_250A2_workaround(jme);
173 jwrite32(jme, JME_GHC, jme->reg_ghc); 234
235 jme_mac_rxclk_on(jme);
236 jme_mac_txclk_on(jme);
237 udelay(1);
238 jme_assert_ghc_reset(jme);
239 udelay(1);
240 jme_mac_rxclk_off(jme);
241 jme_mac_txclk_off(jme);
242 udelay(1);
243 jme_clear_ghc_reset(jme);
244 udelay(1);
245 jme_mac_rxclk_on(jme);
246 jme_mac_txclk_on(jme);
247 udelay(1);
248 jme_mac_rxclk_off(jme);
249 jme_mac_txclk_off(jme);
174 250
175 jwrite32(jme, JME_RXDBA_LO, 0x00000000); 251 jwrite32(jme, JME_RXDBA_LO, 0x00000000);
176 jwrite32(jme, JME_RXDBA_HI, 0x00000000); 252 jwrite32(jme, JME_RXDBA_HI, 0x00000000);
@@ -190,14 +266,6 @@ jme_reset_mac_processor(struct jme_adapter *jme)
190 else 266 else
191 gpreg0 = GPREG0_DEFAULT; 267 gpreg0 = GPREG0_DEFAULT;
192 jwrite32(jme, JME_GPREG0, gpreg0); 268 jwrite32(jme, JME_GPREG0, gpreg0);
193 jwrite32(jme, JME_GPREG1, GPREG1_DEFAULT);
194}
195
196static inline void
197jme_reset_ghc_speed(struct jme_adapter *jme)
198{
199 jme->reg_ghc &= ~(GHC_SPEED_1000M | GHC_DPX);
200 jwrite32(jme, JME_GHC, jme->reg_ghc);
201} 269}
202 270
203static inline void 271static inline void
@@ -336,13 +404,13 @@ jme_linkstat_from_phy(struct jme_adapter *jme)
336} 404}
337 405
338static inline void 406static inline void
339jme_set_phyfifoa(struct jme_adapter *jme) 407jme_set_phyfifo_5level(struct jme_adapter *jme)
340{ 408{
341 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004); 409 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004);
342} 410}
343 411
344static inline void 412static inline void
345jme_set_phyfifob(struct jme_adapter *jme) 413jme_set_phyfifo_8level(struct jme_adapter *jme)
346{ 414{
347 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0000); 415 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0000);
348} 416}
@@ -351,7 +419,7 @@ static int
351jme_check_link(struct net_device *netdev, int testonly) 419jme_check_link(struct net_device *netdev, int testonly)
352{ 420{
353 struct jme_adapter *jme = netdev_priv(netdev); 421 struct jme_adapter *jme = netdev_priv(netdev);
354 u32 phylink, ghc, cnt = JME_SPDRSV_TIMEOUT, bmcr, gpreg1; 422 u32 phylink, cnt = JME_SPDRSV_TIMEOUT, bmcr;
355 char linkmsg[64]; 423 char linkmsg[64];
356 int rc = 0; 424 int rc = 0;
357 425
@@ -414,23 +482,21 @@ jme_check_link(struct net_device *netdev, int testonly)
414 482
415 jme->phylink = phylink; 483 jme->phylink = phylink;
416 484
417 ghc = jme->reg_ghc & ~(GHC_SPEED | GHC_DPX | 485 /*
418 GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE | 486 * The speed/duplex setting of jme->reg_ghc already cleared
419 GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY); 487 * by jme_reset_mac_processor()
488 */
420 switch (phylink & PHY_LINK_SPEED_MASK) { 489 switch (phylink & PHY_LINK_SPEED_MASK) {
421 case PHY_LINK_SPEED_10M: 490 case PHY_LINK_SPEED_10M:
422 ghc |= GHC_SPEED_10M | 491 jme->reg_ghc |= GHC_SPEED_10M;
423 GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
424 strcat(linkmsg, "10 Mbps, "); 492 strcat(linkmsg, "10 Mbps, ");
425 break; 493 break;
426 case PHY_LINK_SPEED_100M: 494 case PHY_LINK_SPEED_100M:
427 ghc |= GHC_SPEED_100M | 495 jme->reg_ghc |= GHC_SPEED_100M;
428 GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
429 strcat(linkmsg, "100 Mbps, "); 496 strcat(linkmsg, "100 Mbps, ");
430 break; 497 break;
431 case PHY_LINK_SPEED_1000M: 498 case PHY_LINK_SPEED_1000M:
432 ghc |= GHC_SPEED_1000M | 499 jme->reg_ghc |= GHC_SPEED_1000M;
433 GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY;
434 strcat(linkmsg, "1000 Mbps, "); 500 strcat(linkmsg, "1000 Mbps, ");
435 break; 501 break;
436 default: 502 default:
@@ -439,42 +505,40 @@ jme_check_link(struct net_device *netdev, int testonly)
439 505
440 if (phylink & PHY_LINK_DUPLEX) { 506 if (phylink & PHY_LINK_DUPLEX) {
441 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT); 507 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
442 ghc |= GHC_DPX; 508 jwrite32(jme, JME_TXTRHD, TXTRHD_FULLDUPLEX);
509 jme->reg_ghc |= GHC_DPX;
443 } else { 510 } else {
444 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT | 511 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
445 TXMCS_BACKOFF | 512 TXMCS_BACKOFF |
446 TXMCS_CARRIERSENSE | 513 TXMCS_CARRIERSENSE |
447 TXMCS_COLLISION); 514 TXMCS_COLLISION);
448 jwrite32(jme, JME_TXTRHD, TXTRHD_TXPEN | 515 jwrite32(jme, JME_TXTRHD, TXTRHD_HALFDUPLEX);
449 ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) |
450 TXTRHD_TXREN |
451 ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL));
452 } 516 }
453 517
454 gpreg1 = GPREG1_DEFAULT; 518 jwrite32(jme, JME_GHC, jme->reg_ghc);
519
455 if (is_buggy250(jme->pdev->device, jme->chiprev)) { 520 if (is_buggy250(jme->pdev->device, jme->chiprev)) {
521 jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH |
522 GPREG1_RSSPATCH);
456 if (!(phylink & PHY_LINK_DUPLEX)) 523 if (!(phylink & PHY_LINK_DUPLEX))
457 gpreg1 |= GPREG1_HALFMODEPATCH; 524 jme->reg_gpreg1 |= GPREG1_HALFMODEPATCH;
458 switch (phylink & PHY_LINK_SPEED_MASK) { 525 switch (phylink & PHY_LINK_SPEED_MASK) {
459 case PHY_LINK_SPEED_10M: 526 case PHY_LINK_SPEED_10M:
460 jme_set_phyfifoa(jme); 527 jme_set_phyfifo_8level(jme);
461 gpreg1 |= GPREG1_RSSPATCH; 528 jme->reg_gpreg1 |= GPREG1_RSSPATCH;
462 break; 529 break;
463 case PHY_LINK_SPEED_100M: 530 case PHY_LINK_SPEED_100M:
464 jme_set_phyfifob(jme); 531 jme_set_phyfifo_5level(jme);
465 gpreg1 |= GPREG1_RSSPATCH; 532 jme->reg_gpreg1 |= GPREG1_RSSPATCH;
466 break; 533 break;
467 case PHY_LINK_SPEED_1000M: 534 case PHY_LINK_SPEED_1000M:
468 jme_set_phyfifoa(jme); 535 jme_set_phyfifo_8level(jme);
469 break; 536 break;
470 default: 537 default:
471 break; 538 break;
472 } 539 }
473 } 540 }
474 541 jwrite32(jme, JME_GPREG1, jme->reg_gpreg1);
475 jwrite32(jme, JME_GPREG1, gpreg1);
476 jwrite32(jme, JME_GHC, ghc);
477 jme->reg_ghc = ghc;
478 542
479 strcat(linkmsg, (phylink & PHY_LINK_DUPLEX) ? 543 strcat(linkmsg, (phylink & PHY_LINK_DUPLEX) ?
480 "Full-Duplex, " : 544 "Full-Duplex, " :
@@ -613,10 +677,14 @@ jme_enable_tx_engine(struct jme_adapter *jme)
613 * Enable TX Engine 677 * Enable TX Engine
614 */ 678 */
615 wmb(); 679 wmb();
616 jwrite32(jme, JME_TXCS, jme->reg_txcs | 680 jwrite32f(jme, JME_TXCS, jme->reg_txcs |
617 TXCS_SELECT_QUEUE0 | 681 TXCS_SELECT_QUEUE0 |
618 TXCS_ENABLE); 682 TXCS_ENABLE);
619 683
684 /*
685 * Start clock for TX MAC Processor
686 */
687 jme_mac_txclk_on(jme);
620} 688}
621 689
622static inline void 690static inline void
@@ -651,6 +719,11 @@ jme_disable_tx_engine(struct jme_adapter *jme)
651 719
652 if (!i) 720 if (!i)
653 pr_err("Disable TX engine timeout\n"); 721 pr_err("Disable TX engine timeout\n");
722
723 /*
724 * Stop clock for TX MAC Processor
725 */
726 jme_mac_txclk_off(jme);
654} 727}
655 728
656static void 729static void
@@ -825,16 +898,22 @@ jme_enable_rx_engine(struct jme_adapter *jme)
825 /* 898 /*
826 * Setup Unicast Filter 899 * Setup Unicast Filter
827 */ 900 */
901 jme_set_unicastaddr(jme->dev);
828 jme_set_multi(jme->dev); 902 jme_set_multi(jme->dev);
829 903
830 /* 904 /*
831 * Enable RX Engine 905 * Enable RX Engine
832 */ 906 */
833 wmb(); 907 wmb();
834 jwrite32(jme, JME_RXCS, jme->reg_rxcs | 908 jwrite32f(jme, JME_RXCS, jme->reg_rxcs |
835 RXCS_QUEUESEL_Q0 | 909 RXCS_QUEUESEL_Q0 |
836 RXCS_ENABLE | 910 RXCS_ENABLE |
837 RXCS_QST); 911 RXCS_QST);
912
913 /*
914 * Start clock for RX MAC Processor
915 */
916 jme_mac_rxclk_on(jme);
838} 917}
839 918
840static inline void 919static inline void
@@ -871,10 +950,40 @@ jme_disable_rx_engine(struct jme_adapter *jme)
871 if (!i) 950 if (!i)
872 pr_err("Disable RX engine timeout\n"); 951 pr_err("Disable RX engine timeout\n");
873 952
953 /*
954 * Stop clock for RX MAC Processor
955 */
956 jme_mac_rxclk_off(jme);
957}
958
959static u16
960jme_udpsum(struct sk_buff *skb)
961{
962 u16 csum = 0xFFFFu;
963
964 if (skb->len < (ETH_HLEN + sizeof(struct iphdr)))
965 return csum;
966 if (skb->protocol != htons(ETH_P_IP))
967 return csum;
968 skb_set_network_header(skb, ETH_HLEN);
969 if ((ip_hdr(skb)->protocol != IPPROTO_UDP) ||
970 (skb->len < (ETH_HLEN +
971 (ip_hdr(skb)->ihl << 2) +
972 sizeof(struct udphdr)))) {
973 skb_reset_network_header(skb);
974 return csum;
975 }
976 skb_set_transport_header(skb,
977 ETH_HLEN + (ip_hdr(skb)->ihl << 2));
978 csum = udp_hdr(skb)->check;
979 skb_reset_transport_header(skb);
980 skb_reset_network_header(skb);
981
982 return csum;
874} 983}
875 984
876static int 985static int
877jme_rxsum_ok(struct jme_adapter *jme, u16 flags) 986jme_rxsum_ok(struct jme_adapter *jme, u16 flags, struct sk_buff *skb)
878{ 987{
879 if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4))) 988 if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4)))
880 return false; 989 return false;
@@ -887,7 +996,7 @@ jme_rxsum_ok(struct jme_adapter *jme, u16 flags)
887 } 996 }
888 997
889 if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS)) 998 if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS))
890 == RXWBFLAG_UDPON)) { 999 == RXWBFLAG_UDPON) && jme_udpsum(skb)) {
891 if (flags & RXWBFLAG_IPV4) 1000 if (flags & RXWBFLAG_IPV4)
892 netif_err(jme, rx_err, jme->dev, "UDP Checksum error\n"); 1001 netif_err(jme, rx_err, jme->dev, "UDP Checksum error\n");
893 return false; 1002 return false;
@@ -935,7 +1044,7 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
935 skb_put(skb, framesize); 1044 skb_put(skb, framesize);
936 skb->protocol = eth_type_trans(skb, jme->dev); 1045 skb->protocol = eth_type_trans(skb, jme->dev);
937 1046
938 if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags))) 1047 if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags), skb))
939 skb->ip_summed = CHECKSUM_UNNECESSARY; 1048 skb->ip_summed = CHECKSUM_UNNECESSARY;
940 else 1049 else
941 skb_checksum_none_assert(skb); 1050 skb_checksum_none_assert(skb);
@@ -1207,7 +1316,6 @@ jme_link_change_tasklet(unsigned long arg)
1207 tasklet_disable(&jme->rxempty_task); 1316 tasklet_disable(&jme->rxempty_task);
1208 1317
1209 if (netif_carrier_ok(netdev)) { 1318 if (netif_carrier_ok(netdev)) {
1210 jme_reset_ghc_speed(jme);
1211 jme_disable_rx_engine(jme); 1319 jme_disable_rx_engine(jme);
1212 jme_disable_tx_engine(jme); 1320 jme_disable_tx_engine(jme);
1213 jme_reset_mac_processor(jme); 1321 jme_reset_mac_processor(jme);
@@ -1577,6 +1685,38 @@ jme_free_irq(struct jme_adapter *jme)
1577} 1685}
1578 1686
1579static inline void 1687static inline void
1688jme_new_phy_on(struct jme_adapter *jme)
1689{
1690 u32 reg;
1691
1692 reg = jread32(jme, JME_PHY_PWR);
1693 reg &= ~(PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW |
1694 PHY_PWR_DWN2 | PHY_PWR_CLKSEL);
1695 jwrite32(jme, JME_PHY_PWR, reg);
1696
1697 pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, &reg);
1698 reg &= ~PE1_GPREG0_PBG;
1699 reg |= PE1_GPREG0_ENBG;
1700 pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg);
1701}
1702
1703static inline void
1704jme_new_phy_off(struct jme_adapter *jme)
1705{
1706 u32 reg;
1707
1708 reg = jread32(jme, JME_PHY_PWR);
1709 reg |= PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW |
1710 PHY_PWR_DWN2 | PHY_PWR_CLKSEL;
1711 jwrite32(jme, JME_PHY_PWR, reg);
1712
1713 pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, &reg);
1714 reg &= ~PE1_GPREG0_PBG;
1715 reg |= PE1_GPREG0_PDD3COLD;
1716 pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg);
1717}
1718
1719static inline void
1580jme_phy_on(struct jme_adapter *jme) 1720jme_phy_on(struct jme_adapter *jme)
1581{ 1721{
1582 u32 bmcr; 1722 u32 bmcr;
@@ -1584,6 +1724,22 @@ jme_phy_on(struct jme_adapter *jme)
1584 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); 1724 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1585 bmcr &= ~BMCR_PDOWN; 1725 bmcr &= ~BMCR_PDOWN;
1586 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr); 1726 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
1727
1728 if (new_phy_power_ctrl(jme->chip_main_rev))
1729 jme_new_phy_on(jme);
1730}
1731
1732static inline void
1733jme_phy_off(struct jme_adapter *jme)
1734{
1735 u32 bmcr;
1736
1737 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1738 bmcr |= BMCR_PDOWN;
1739 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
1740
1741 if (new_phy_power_ctrl(jme->chip_main_rev))
1742 jme_new_phy_off(jme);
1587} 1743}
1588 1744
1589static int 1745static int
@@ -1606,12 +1762,11 @@ jme_open(struct net_device *netdev)
1606 1762
1607 jme_start_irq(jme); 1763 jme_start_irq(jme);
1608 1764
1609 if (test_bit(JME_FLAG_SSET, &jme->flags)) { 1765 jme_phy_on(jme);
1610 jme_phy_on(jme); 1766 if (test_bit(JME_FLAG_SSET, &jme->flags))
1611 jme_set_settings(netdev, &jme->old_ecmd); 1767 jme_set_settings(netdev, &jme->old_ecmd);
1612 } else { 1768 else
1613 jme_reset_phy_processor(jme); 1769 jme_reset_phy_processor(jme);
1614 }
1615 1770
1616 jme_reset_link(jme); 1771 jme_reset_link(jme);
1617 1772
@@ -1657,12 +1812,6 @@ jme_wait_link(struct jme_adapter *jme)
1657 } 1812 }
1658} 1813}
1659 1814
1660static inline void
1661jme_phy_off(struct jme_adapter *jme)
1662{
1663 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, BMCR_PDOWN);
1664}
1665
1666static void 1815static void
1667jme_powersave_phy(struct jme_adapter *jme) 1816jme_powersave_phy(struct jme_adapter *jme)
1668{ 1817{
@@ -1696,7 +1845,6 @@ jme_close(struct net_device *netdev)
1696 tasklet_disable(&jme->rxclean_task); 1845 tasklet_disable(&jme->rxclean_task);
1697 tasklet_disable(&jme->rxempty_task); 1846 tasklet_disable(&jme->rxempty_task);
1698 1847
1699 jme_reset_ghc_speed(jme);
1700 jme_disable_rx_engine(jme); 1848 jme_disable_rx_engine(jme);
1701 jme_disable_tx_engine(jme); 1849 jme_disable_tx_engine(jme);
1702 jme_reset_mac_processor(jme); 1850 jme_reset_mac_processor(jme);
@@ -1993,27 +2141,34 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1993 return NETDEV_TX_OK; 2141 return NETDEV_TX_OK;
1994} 2142}
1995 2143
2144static void
2145jme_set_unicastaddr(struct net_device *netdev)
2146{
2147 struct jme_adapter *jme = netdev_priv(netdev);
2148 u32 val;
2149
2150 val = (netdev->dev_addr[3] & 0xff) << 24 |
2151 (netdev->dev_addr[2] & 0xff) << 16 |
2152 (netdev->dev_addr[1] & 0xff) << 8 |
2153 (netdev->dev_addr[0] & 0xff);
2154 jwrite32(jme, JME_RXUMA_LO, val);
2155 val = (netdev->dev_addr[5] & 0xff) << 8 |
2156 (netdev->dev_addr[4] & 0xff);
2157 jwrite32(jme, JME_RXUMA_HI, val);
2158}
2159
1996static int 2160static int
1997jme_set_macaddr(struct net_device *netdev, void *p) 2161jme_set_macaddr(struct net_device *netdev, void *p)
1998{ 2162{
1999 struct jme_adapter *jme = netdev_priv(netdev); 2163 struct jme_adapter *jme = netdev_priv(netdev);
2000 struct sockaddr *addr = p; 2164 struct sockaddr *addr = p;
2001 u32 val;
2002 2165
2003 if (netif_running(netdev)) 2166 if (netif_running(netdev))
2004 return -EBUSY; 2167 return -EBUSY;
2005 2168
2006 spin_lock_bh(&jme->macaddr_lock); 2169 spin_lock_bh(&jme->macaddr_lock);
2007 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 2170 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2008 2171 jme_set_unicastaddr(netdev);
2009 val = (addr->sa_data[3] & 0xff) << 24 |
2010 (addr->sa_data[2] & 0xff) << 16 |
2011 (addr->sa_data[1] & 0xff) << 8 |
2012 (addr->sa_data[0] & 0xff);
2013 jwrite32(jme, JME_RXUMA_LO, val);
2014 val = (addr->sa_data[5] & 0xff) << 8 |
2015 (addr->sa_data[4] & 0xff);
2016 jwrite32(jme, JME_RXUMA_HI, val);
2017 spin_unlock_bh(&jme->macaddr_lock); 2172 spin_unlock_bh(&jme->macaddr_lock);
2018 2173
2019 return 0; 2174 return 0;
@@ -2731,6 +2886,8 @@ jme_check_hw_ver(struct jme_adapter *jme)
2731 2886
2732 jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT; 2887 jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT;
2733 jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT; 2888 jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT;
2889 jme->chip_main_rev = jme->chiprev & 0xF;
2890 jme->chip_sub_rev = (jme->chiprev >> 4) & 0xF;
2734} 2891}
2735 2892
2736static const struct net_device_ops jme_netdev_ops = { 2893static const struct net_device_ops jme_netdev_ops = {
@@ -2880,6 +3037,7 @@ jme_init_one(struct pci_dev *pdev,
2880 jme->reg_rxmcs = RXMCS_DEFAULT; 3037 jme->reg_rxmcs = RXMCS_DEFAULT;
2881 jme->reg_txpfc = 0; 3038 jme->reg_txpfc = 0;
2882 jme->reg_pmcs = PMCS_MFEN; 3039 jme->reg_pmcs = PMCS_MFEN;
3040 jme->reg_gpreg1 = GPREG1_DEFAULT;
2883 set_bit(JME_FLAG_TXCSUM, &jme->flags); 3041 set_bit(JME_FLAG_TXCSUM, &jme->flags);
2884 set_bit(JME_FLAG_TSO, &jme->flags); 3042 set_bit(JME_FLAG_TSO, &jme->flags);
2885 3043
@@ -2936,8 +3094,8 @@ jme_init_one(struct pci_dev *pdev,
2936 jme->mii_if.mdio_write = jme_mdio_write; 3094 jme->mii_if.mdio_write = jme_mdio_write;
2937 3095
2938 jme_clear_pm(jme); 3096 jme_clear_pm(jme);
2939 jme_set_phyfifoa(jme); 3097 jme_set_phyfifo_5level(jme);
2940 pci_read_config_byte(pdev, PCI_REVISION_ID, &jme->rev); 3098 jme->pcirev = pdev->revision;
2941 if (!jme->fpgaver) 3099 if (!jme->fpgaver)
2942 jme_phy_init(jme); 3100 jme_phy_init(jme);
2943 jme_phy_off(jme); 3101 jme_phy_off(jme);
@@ -2964,14 +3122,14 @@ jme_init_one(struct pci_dev *pdev,
2964 goto err_out_unmap; 3122 goto err_out_unmap;
2965 } 3123 }
2966 3124
2967 netif_info(jme, probe, jme->dev, "%s%s ver:%x rev:%x macaddr:%pM\n", 3125 netif_info(jme, probe, jme->dev, "%s%s chiprev:%x pcirev:%x macaddr:%pM\n",
2968 (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ? 3126 (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ?
2969 "JMC250 Gigabit Ethernet" : 3127 "JMC250 Gigabit Ethernet" :
2970 (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ? 3128 (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ?
2971 "JMC260 Fast Ethernet" : "Unknown", 3129 "JMC260 Fast Ethernet" : "Unknown",
2972 (jme->fpgaver != 0) ? " (FPGA)" : "", 3130 (jme->fpgaver != 0) ? " (FPGA)" : "",
2973 (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev, 3131 (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev,
2974 jme->rev, netdev->dev_addr); 3132 jme->pcirev, netdev->dev_addr);
2975 3133
2976 return 0; 3134 return 0;
2977 3135
@@ -3035,7 +3193,6 @@ jme_suspend(struct pci_dev *pdev, pm_message_t state)
3035 jme_polling_mode(jme); 3193 jme_polling_mode(jme);
3036 3194
3037 jme_stop_pcc_timer(jme); 3195 jme_stop_pcc_timer(jme);
3038 jme_reset_ghc_speed(jme);
3039 jme_disable_rx_engine(jme); 3196 jme_disable_rx_engine(jme);
3040 jme_disable_tx_engine(jme); 3197 jme_disable_tx_engine(jme);
3041 jme_reset_mac_processor(jme); 3198 jme_reset_mac_processor(jme);
@@ -3066,12 +3223,11 @@ jme_resume(struct pci_dev *pdev)
3066 jme_clear_pm(jme); 3223 jme_clear_pm(jme);
3067 pci_restore_state(pdev); 3224 pci_restore_state(pdev);
3068 3225
3069 if (test_bit(JME_FLAG_SSET, &jme->flags)) { 3226 jme_phy_on(jme);
3070 jme_phy_on(jme); 3227 if (test_bit(JME_FLAG_SSET, &jme->flags))
3071 jme_set_settings(netdev, &jme->old_ecmd); 3228 jme_set_settings(netdev, &jme->old_ecmd);
3072 } else { 3229 else
3073 jme_reset_phy_processor(jme); 3230 jme_reset_phy_processor(jme);
3074 }
3075 3231
3076 jme_start_irq(jme); 3232 jme_start_irq(jme);
3077 netif_device_attach(netdev); 3233 netif_device_attach(netdev);
diff --git a/drivers/net/jme.h b/drivers/net/jme.h
index eac09264bf2..8bf30451e82 100644
--- a/drivers/net/jme.h
+++ b/drivers/net/jme.h
@@ -26,7 +26,7 @@
26#define __JME_H_INCLUDED__ 26#define __JME_H_INCLUDED__
27 27
28#define DRV_NAME "jme" 28#define DRV_NAME "jme"
29#define DRV_VERSION "1.0.7" 29#define DRV_VERSION "1.0.8"
30#define PFX DRV_NAME ": " 30#define PFX DRV_NAME ": "
31 31
32#define PCI_DEVICE_ID_JMICRON_JMC250 0x0250 32#define PCI_DEVICE_ID_JMICRON_JMC250 0x0250
@@ -103,6 +103,37 @@ enum jme_spi_op_bits {
103#define HALF_US 500 /* 500 ns */ 103#define HALF_US 500 /* 500 ns */
104#define JMESPIIOCTL SIOCDEVPRIVATE 104#define JMESPIIOCTL SIOCDEVPRIVATE
105 105
106#define PCI_PRIV_PE1 0xE4
107
108enum pci_priv_pe1_bit_masks {
109 PE1_ASPMSUPRT = 0x00000003, /*
110 * RW:
111 * Aspm_support[1:0]
112 * (R/W Port of 5C[11:10])
113 */
114 PE1_MULTIFUN = 0x00000004, /* RW: Multi_fun_bit */
115 PE1_RDYDMA = 0x00000008, /* RO: ~link.rdy_for_dma */
116 PE1_ASPMOPTL = 0x00000030, /* RW: link.rx10s_option[1:0] */
117 PE1_ASPMOPTH = 0x000000C0, /* RW: 10_req=[3]?HW:[2] */
118 PE1_GPREG0 = 0x0000FF00, /*
119 * SRW:
120 * Cfg_gp_reg0
121 * [7:6] phy_giga BG control
122 * [5] CREQ_N as CREQ_N1 (CPPE# as CREQ#)
123 * [4:0] Reserved
124 */
125 PE1_GPREG0_PBG = 0x0000C000, /* phy_giga BG control */
126 PE1_GPREG1 = 0x00FF0000, /* RW: Cfg_gp_reg1 */
127 PE1_REVID = 0xFF000000, /* RO: Rev ID */
128};
129
130enum pci_priv_pe1_values {
131 PE1_GPREG0_ENBG = 0x00000000, /* en BG */
132 PE1_GPREG0_PDD3COLD = 0x00004000, /* giga_PD + d3cold */
133 PE1_GPREG0_PDPCIESD = 0x00008000, /* giga_PD + pcie_shutdown */
134 PE1_GPREG0_PDPCIEIDDQ = 0x0000C000, /* giga_PD + pcie_iddq */
135};
136
106/* 137/*
107 * Dynamic(adaptive)/Static PCC values 138 * Dynamic(adaptive)/Static PCC values
108 */ 139 */
@@ -403,6 +434,7 @@ struct jme_adapter {
403 u32 reg_rxmcs; 434 u32 reg_rxmcs;
404 u32 reg_ghc; 435 u32 reg_ghc;
405 u32 reg_pmcs; 436 u32 reg_pmcs;
437 u32 reg_gpreg1;
406 u32 phylink; 438 u32 phylink;
407 u32 tx_ring_size; 439 u32 tx_ring_size;
408 u32 tx_ring_mask; 440 u32 tx_ring_mask;
@@ -411,8 +443,10 @@ struct jme_adapter {
411 u32 rx_ring_mask; 443 u32 rx_ring_mask;
412 u8 mrrs; 444 u8 mrrs;
413 unsigned int fpgaver; 445 unsigned int fpgaver;
414 unsigned int chiprev; 446 u8 chiprev;
415 u8 rev; 447 u8 chip_main_rev;
448 u8 chip_sub_rev;
449 u8 pcirev;
416 u32 msg_enable; 450 u32 msg_enable;
417 struct ethtool_cmd old_ecmd; 451 struct ethtool_cmd old_ecmd;
418 unsigned int old_mtu; 452 unsigned int old_mtu;
@@ -497,6 +531,7 @@ enum jme_iomap_regs {
497 JME_PMCS = JME_MAC | 0x60, /* Power Management Control/Stat */ 531 JME_PMCS = JME_MAC | 0x60, /* Power Management Control/Stat */
498 532
499 533
534 JME_PHY_PWR = JME_PHY | 0x24, /* New PHY Power Ctrl Register */
500 JME_PHY_CS = JME_PHY | 0x28, /* PHY Ctrl and Status Register */ 535 JME_PHY_CS = JME_PHY | 0x28, /* PHY Ctrl and Status Register */
501 JME_PHY_LINK = JME_PHY | 0x30, /* PHY Link Status Register */ 536 JME_PHY_LINK = JME_PHY | 0x30, /* PHY Link Status Register */
502 JME_SMBCSR = JME_PHY | 0x40, /* SMB Control and Status */ 537 JME_SMBCSR = JME_PHY | 0x40, /* SMB Control and Status */
@@ -624,6 +659,14 @@ enum jme_txtrhd_shifts {
624 TXTRHD_TXRL_SHIFT = 0, 659 TXTRHD_TXRL_SHIFT = 0,
625}; 660};
626 661
662enum jme_txtrhd_values {
663 TXTRHD_FULLDUPLEX = 0x00000000,
664 TXTRHD_HALFDUPLEX = TXTRHD_TXPEN |
665 ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) |
666 TXTRHD_TXREN |
667 ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL),
668};
669
627/* 670/*
628 * RX Control/Status Bits 671 * RX Control/Status Bits
629 */ 672 */
@@ -779,6 +822,8 @@ static inline u32 smi_phy_addr(int x)
779 */ 822 */
780enum jme_ghc_bit_mask { 823enum jme_ghc_bit_mask {
781 GHC_SWRST = 0x40000000, 824 GHC_SWRST = 0x40000000,
825 GHC_TO_CLK_SRC = 0x00C00000,
826 GHC_TXMAC_CLK_SRC = 0x00300000,
782 GHC_DPX = 0x00000040, 827 GHC_DPX = 0x00000040,
783 GHC_SPEED = 0x00000030, 828 GHC_SPEED = 0x00000030,
784 GHC_LINK_POLL = 0x00000001, 829 GHC_LINK_POLL = 0x00000001,
@@ -833,6 +878,21 @@ enum jme_pmcs_bit_masks {
833}; 878};
834 879
835/* 880/*
881 * New PHY Power Control Register
882 */
883enum jme_phy_pwr_bit_masks {
884 PHY_PWR_DWN1SEL = 0x01000000, /* Phy_giga.p_PWR_DOWN1_SEL */
885 PHY_PWR_DWN1SW = 0x02000000, /* Phy_giga.p_PWR_DOWN1_SW */
886 PHY_PWR_DWN2 = 0x04000000, /* Phy_giga.p_PWR_DOWN2 */
887 PHY_PWR_CLKSEL = 0x08000000, /*
888 * XTL_OUT Clock select
889 * (an internal free-running clock)
890 * 0: xtl_out = phy_giga.A_XTL25_O
891 * 1: xtl_out = phy_giga.PD_OSC
892 */
893};
894
895/*
836 * Giga PHY Status Registers 896 * Giga PHY Status Registers
837 */ 897 */
838enum jme_phy_link_bit_mask { 898enum jme_phy_link_bit_mask {
@@ -942,18 +1002,17 @@ enum jme_gpreg0_vals {
942 1002
943/* 1003/*
944 * General Purpose REG-1 1004 * General Purpose REG-1
945 * Note: All theses bits defined here are for
946 * Chip mode revision 0x11 only
947 */ 1005 */
948enum jme_gpreg1_masks { 1006enum jme_gpreg1_bit_masks {
1007 GPREG1_RXCLKOFF = 0x04000000,
1008 GPREG1_PCREQN = 0x00020000,
1009 GPREG1_HALFMODEPATCH = 0x00000040, /* For Chip revision 0x11 only */
1010 GPREG1_RSSPATCH = 0x00000020, /* For Chip revision 0x11 only */
949 GPREG1_INTRDELAYUNIT = 0x00000018, 1011 GPREG1_INTRDELAYUNIT = 0x00000018,
950 GPREG1_INTRDELAYENABLE = 0x00000007, 1012 GPREG1_INTRDELAYENABLE = 0x00000007,
951}; 1013};
952 1014
953enum jme_gpreg1_vals { 1015enum jme_gpreg1_vals {
954 GPREG1_RSSPATCH = 0x00000040,
955 GPREG1_HALFMODEPATCH = 0x00000020,
956
957 GPREG1_INTDLYUNIT_16NS = 0x00000000, 1016 GPREG1_INTDLYUNIT_16NS = 0x00000000,
958 GPREG1_INTDLYUNIT_256NS = 0x00000008, 1017 GPREG1_INTDLYUNIT_256NS = 0x00000008,
959 GPREG1_INTDLYUNIT_1US = 0x00000010, 1018 GPREG1_INTDLYUNIT_1US = 0x00000010,
@@ -967,7 +1026,7 @@ enum jme_gpreg1_vals {
967 GPREG1_INTDLYEN_6U = 0x00000006, 1026 GPREG1_INTDLYEN_6U = 0x00000006,
968 GPREG1_INTDLYEN_7U = 0x00000007, 1027 GPREG1_INTDLYEN_7U = 0x00000007,
969 1028
970 GPREG1_DEFAULT = 0x00000000, 1029 GPREG1_DEFAULT = GPREG1_PCREQN,
971}; 1030};
972 1031
973/* 1032/*
@@ -1184,16 +1243,22 @@ enum jme_phy_reg17_vals {
1184/* 1243/*
1185 * Workaround 1244 * Workaround
1186 */ 1245 */
1187static inline int is_buggy250(unsigned short device, unsigned int chiprev) 1246static inline int is_buggy250(unsigned short device, u8 chiprev)
1188{ 1247{
1189 return device == PCI_DEVICE_ID_JMICRON_JMC250 && chiprev == 0x11; 1248 return device == PCI_DEVICE_ID_JMICRON_JMC250 && chiprev == 0x11;
1190} 1249}
1191 1250
1251static inline int new_phy_power_ctrl(u8 chip_main_rev)
1252{
1253 return chip_main_rev >= 5;
1254}
1255
1192/* 1256/*
1193 * Function prototypes 1257 * Function prototypes
1194 */ 1258 */
1195static int jme_set_settings(struct net_device *netdev, 1259static int jme_set_settings(struct net_device *netdev,
1196 struct ethtool_cmd *ecmd); 1260 struct ethtool_cmd *ecmd);
1261static void jme_set_unicastaddr(struct net_device *netdev);
1197static void jme_set_multi(struct net_device *netdev); 1262static void jme_set_multi(struct net_device *netdev);
1198 1263
1199#endif 1264#endif
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 2d9663a1c54..ea0dc451da9 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -129,10 +129,6 @@ static u32 always_on(struct net_device *dev)
129 129
130static const struct ethtool_ops loopback_ethtool_ops = { 130static const struct ethtool_ops loopback_ethtool_ops = {
131 .get_link = always_on, 131 .get_link = always_on,
132 .set_tso = ethtool_op_set_tso,
133 .get_tx_csum = always_on,
134 .get_sg = always_on,
135 .get_rx_csum = always_on,
136}; 132};
137 133
138static int loopback_dev_init(struct net_device *dev) 134static int loopback_dev_init(struct net_device *dev)
@@ -169,9 +165,12 @@ static void loopback_setup(struct net_device *dev)
169 dev->type = ARPHRD_LOOPBACK; /* 0x0001*/ 165 dev->type = ARPHRD_LOOPBACK; /* 0x0001*/
170 dev->flags = IFF_LOOPBACK; 166 dev->flags = IFF_LOOPBACK;
171 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 167 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
168 dev->hw_features = NETIF_F_ALL_TSO | NETIF_F_UFO;
172 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST 169 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
173 | NETIF_F_TSO 170 | NETIF_F_ALL_TSO
171 | NETIF_F_UFO
174 | NETIF_F_NO_CSUM 172 | NETIF_F_NO_CSUM
173 | NETIF_F_RXCSUM
175 | NETIF_F_HIGHDMA 174 | NETIF_F_HIGHDMA
176 | NETIF_F_LLTX 175 | NETIF_F_LLTX
177 | NETIF_F_NETNS_LOCAL; 176 | NETIF_F_NETNS_LOCAL;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 5933621ac3f..2300e459952 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -39,7 +39,7 @@ struct macvtap_queue {
39 struct socket sock; 39 struct socket sock;
40 struct socket_wq wq; 40 struct socket_wq wq;
41 int vnet_hdr_sz; 41 int vnet_hdr_sz;
42 struct macvlan_dev *vlan; 42 struct macvlan_dev __rcu *vlan;
43 struct file *file; 43 struct file *file;
44 unsigned int flags; 44 unsigned int flags;
45}; 45};
@@ -141,7 +141,8 @@ static void macvtap_put_queue(struct macvtap_queue *q)
141 struct macvlan_dev *vlan; 141 struct macvlan_dev *vlan;
142 142
143 spin_lock(&macvtap_lock); 143 spin_lock(&macvtap_lock);
144 vlan = rcu_dereference(q->vlan); 144 vlan = rcu_dereference_protected(q->vlan,
145 lockdep_is_held(&macvtap_lock));
145 if (vlan) { 146 if (vlan) {
146 int index = get_slot(vlan, q); 147 int index = get_slot(vlan, q);
147 148
@@ -219,7 +220,8 @@ static void macvtap_del_queues(struct net_device *dev)
219 /* macvtap_put_queue can free some slots, so go through all slots */ 220 /* macvtap_put_queue can free some slots, so go through all slots */
220 spin_lock(&macvtap_lock); 221 spin_lock(&macvtap_lock);
221 for (i = 0; i < MAX_MACVTAP_QUEUES && vlan->numvtaps; i++) { 222 for (i = 0; i < MAX_MACVTAP_QUEUES && vlan->numvtaps; i++) {
222 q = rcu_dereference(vlan->taps[i]); 223 q = rcu_dereference_protected(vlan->taps[i],
224 lockdep_is_held(&macvtap_lock));
223 if (q) { 225 if (q) {
224 qlist[j++] = q; 226 qlist[j++] = q;
225 rcu_assign_pointer(vlan->taps[i], NULL); 227 rcu_assign_pointer(vlan->taps[i], NULL);
@@ -569,7 +571,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q,
569 } 571 }
570 572
571 rcu_read_lock_bh(); 573 rcu_read_lock_bh();
572 vlan = rcu_dereference(q->vlan); 574 vlan = rcu_dereference_bh(q->vlan);
573 if (vlan) 575 if (vlan)
574 macvlan_start_xmit(skb, vlan->dev); 576 macvlan_start_xmit(skb, vlan->dev);
575 else 577 else
@@ -583,7 +585,7 @@ err_kfree:
583 585
584err: 586err:
585 rcu_read_lock_bh(); 587 rcu_read_lock_bh();
586 vlan = rcu_dereference(q->vlan); 588 vlan = rcu_dereference_bh(q->vlan);
587 if (vlan) 589 if (vlan)
588 vlan->dev->stats.tx_dropped++; 590 vlan->dev->stats.tx_dropped++;
589 rcu_read_unlock_bh(); 591 rcu_read_unlock_bh();
@@ -631,7 +633,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
631 ret = skb_copy_datagram_const_iovec(skb, 0, iv, vnet_hdr_len, len); 633 ret = skb_copy_datagram_const_iovec(skb, 0, iv, vnet_hdr_len, len);
632 634
633 rcu_read_lock_bh(); 635 rcu_read_lock_bh();
634 vlan = rcu_dereference(q->vlan); 636 vlan = rcu_dereference_bh(q->vlan);
635 if (vlan) 637 if (vlan)
636 macvlan_count_rx(vlan, len, ret == 0, 0); 638 macvlan_count_rx(vlan, len, ret == 0, 0);
637 rcu_read_unlock_bh(); 639 rcu_read_unlock_bh();
@@ -727,7 +729,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
727 729
728 case TUNGETIFF: 730 case TUNGETIFF:
729 rcu_read_lock_bh(); 731 rcu_read_lock_bh();
730 vlan = rcu_dereference(q->vlan); 732 vlan = rcu_dereference_bh(q->vlan);
731 if (vlan) 733 if (vlan)
732 dev_hold(vlan->dev); 734 dev_hold(vlan->dev);
733 rcu_read_unlock_bh(); 735 rcu_read_unlock_bh();
@@ -736,7 +738,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
736 return -ENOLINK; 738 return -ENOLINK;
737 739
738 ret = 0; 740 ret = 0;
739 if (copy_to_user(&ifr->ifr_name, q->vlan->dev->name, IFNAMSIZ) || 741 if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
740 put_user(q->flags, &ifr->ifr_flags)) 742 put_user(q->flags, &ifr->ifr_flags))
741 ret = -EFAULT; 743 ret = -EFAULT;
742 dev_put(vlan->dev); 744 dev_put(vlan->dev);
diff --git a/drivers/net/mii.c b/drivers/net/mii.c
index 210b2b164b3..0a6c6a2e755 100644
--- a/drivers/net/mii.c
+++ b/drivers/net/mii.c
@@ -354,7 +354,7 @@ unsigned int mii_check_media (struct mii_if_info *mii,
354 if (!new_carrier) { 354 if (!new_carrier) {
355 netif_carrier_off(mii->dev); 355 netif_carrier_off(mii->dev);
356 if (ok_to_print) 356 if (ok_to_print)
357 printk(KERN_INFO "%s: link down\n", mii->dev->name); 357 netdev_info(mii->dev, "link down\n");
358 return 0; /* duplex did not change */ 358 return 0; /* duplex did not change */
359 } 359 }
360 360
@@ -381,12 +381,12 @@ unsigned int mii_check_media (struct mii_if_info *mii,
381 duplex = 1; 381 duplex = 1;
382 382
383 if (ok_to_print) 383 if (ok_to_print)
384 printk(KERN_INFO "%s: link up, %sMbps, %s-duplex, lpa 0x%04X\n", 384 netdev_info(mii->dev, "link up, %uMbps, %s-duplex, lpa 0x%04X\n",
385 mii->dev->name, 385 lpa2 & (LPA_1000FULL | LPA_1000HALF) ? 1000 :
386 lpa2 & (LPA_1000FULL | LPA_1000HALF) ? "1000" : 386 media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ?
387 media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ? "100" : "10", 387 100 : 10,
388 duplex ? "full" : "half", 388 duplex ? "full" : "half",
389 lpa); 389 lpa);
390 390
391 if ((init_media) || (mii->full_duplex != duplex)) { 391 if ((init_media) || (mii->full_duplex != duplex)) {
392 mii->full_duplex = duplex; 392 mii->full_duplex = duplex;
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 02076e16542..34425b94452 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -35,6 +35,8 @@
35 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 35 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
36 */ 36 */
37 37
38#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
39
38#include <linux/init.h> 40#include <linux/init.h>
39#include <linux/dma-mapping.h> 41#include <linux/dma-mapping.h>
40#include <linux/in.h> 42#include <linux/in.h>
@@ -627,9 +629,8 @@ err:
627 if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != 629 if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
628 (RX_FIRST_DESC | RX_LAST_DESC)) { 630 (RX_FIRST_DESC | RX_LAST_DESC)) {
629 if (net_ratelimit()) 631 if (net_ratelimit())
630 dev_printk(KERN_ERR, &mp->dev->dev, 632 netdev_err(mp->dev,
631 "received packet spanning " 633 "received packet spanning multiple descriptors\n");
632 "multiple descriptors\n");
633 } 634 }
634 635
635 if (cmd_sts & ERROR_SUMMARY) 636 if (cmd_sts & ERROR_SUMMARY)
@@ -868,15 +869,14 @@ static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
868 869
869 if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) { 870 if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
870 txq->tx_dropped++; 871 txq->tx_dropped++;
871 dev_printk(KERN_DEBUG, &dev->dev, 872 netdev_printk(KERN_DEBUG, dev,
872 "failed to linearize skb with tiny " 873 "failed to linearize skb with tiny unaligned fragment\n");
873 "unaligned fragment\n");
874 return NETDEV_TX_BUSY; 874 return NETDEV_TX_BUSY;
875 } 875 }
876 876
877 if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) { 877 if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
878 if (net_ratelimit()) 878 if (net_ratelimit())
879 dev_printk(KERN_ERR, &dev->dev, "tx queue full?!\n"); 879 netdev_err(dev, "tx queue full?!\n");
880 kfree_skb(skb); 880 kfree_skb(skb);
881 return NETDEV_TX_OK; 881 return NETDEV_TX_OK;
882 } 882 }
@@ -959,7 +959,7 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
959 skb = __skb_dequeue(&txq->tx_skb); 959 skb = __skb_dequeue(&txq->tx_skb);
960 960
961 if (cmd_sts & ERROR_SUMMARY) { 961 if (cmd_sts & ERROR_SUMMARY) {
962 dev_printk(KERN_INFO, &mp->dev->dev, "tx error\n"); 962 netdev_info(mp->dev, "tx error\n");
963 mp->dev->stats.tx_errors++; 963 mp->dev->stats.tx_errors++;
964 } 964 }
965 965
@@ -1122,20 +1122,20 @@ static int smi_bus_read(struct mii_bus *bus, int addr, int reg)
1122 int ret; 1122 int ret;
1123 1123
1124 if (smi_wait_ready(msp)) { 1124 if (smi_wait_ready(msp)) {
1125 printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n"); 1125 pr_warn("SMI bus busy timeout\n");
1126 return -ETIMEDOUT; 1126 return -ETIMEDOUT;
1127 } 1127 }
1128 1128
1129 writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg); 1129 writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg);
1130 1130
1131 if (smi_wait_ready(msp)) { 1131 if (smi_wait_ready(msp)) {
1132 printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n"); 1132 pr_warn("SMI bus busy timeout\n");
1133 return -ETIMEDOUT; 1133 return -ETIMEDOUT;
1134 } 1134 }
1135 1135
1136 ret = readl(smi_reg); 1136 ret = readl(smi_reg);
1137 if (!(ret & SMI_READ_VALID)) { 1137 if (!(ret & SMI_READ_VALID)) {
1138 printk(KERN_WARNING "mv643xx_eth: SMI bus read not valid\n"); 1138 pr_warn("SMI bus read not valid\n");
1139 return -ENODEV; 1139 return -ENODEV;
1140 } 1140 }
1141 1141
@@ -1148,7 +1148,7 @@ static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val)
1148 void __iomem *smi_reg = msp->base + SMI_REG; 1148 void __iomem *smi_reg = msp->base + SMI_REG;
1149 1149
1150 if (smi_wait_ready(msp)) { 1150 if (smi_wait_ready(msp)) {
1151 printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n"); 1151 pr_warn("SMI bus busy timeout\n");
1152 return -ETIMEDOUT; 1152 return -ETIMEDOUT;
1153 } 1153 }
1154 1154
@@ -1156,7 +1156,7 @@ static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val)
1156 (addr << 16) | (val & 0xffff), smi_reg); 1156 (addr << 16) | (val & 0xffff), smi_reg);
1157 1157
1158 if (smi_wait_ready(msp)) { 1158 if (smi_wait_ready(msp)) {
1159 printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n"); 1159 pr_warn("SMI bus busy timeout\n");
1160 return -ETIMEDOUT; 1160 return -ETIMEDOUT;
1161 } 1161 }
1162 1162
@@ -1566,9 +1566,8 @@ mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
1566 if (netif_running(dev)) { 1566 if (netif_running(dev)) {
1567 mv643xx_eth_stop(dev); 1567 mv643xx_eth_stop(dev);
1568 if (mv643xx_eth_open(dev)) { 1568 if (mv643xx_eth_open(dev)) {
1569 dev_printk(KERN_ERR, &dev->dev, 1569 netdev_err(dev,
1570 "fatal error on re-opening device after " 1570 "fatal error on re-opening device after ring param change\n");
1571 "ring param change\n");
1572 return -ENOMEM; 1571 return -ENOMEM;
1573 } 1572 }
1574 } 1573 }
@@ -1874,7 +1873,7 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
1874 } 1873 }
1875 1874
1876 if (rxq->rx_desc_area == NULL) { 1875 if (rxq->rx_desc_area == NULL) {
1877 dev_printk(KERN_ERR, &mp->dev->dev, 1876 netdev_err(mp->dev,
1878 "can't allocate rx ring (%d bytes)\n", size); 1877 "can't allocate rx ring (%d bytes)\n", size);
1879 goto out; 1878 goto out;
1880 } 1879 }
@@ -1884,8 +1883,7 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
1884 rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb), 1883 rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb),
1885 GFP_KERNEL); 1884 GFP_KERNEL);
1886 if (rxq->rx_skb == NULL) { 1885 if (rxq->rx_skb == NULL) {
1887 dev_printk(KERN_ERR, &mp->dev->dev, 1886 netdev_err(mp->dev, "can't allocate rx skb ring\n");
1888 "can't allocate rx skb ring\n");
1889 goto out_free; 1887 goto out_free;
1890 } 1888 }
1891 1889
@@ -1944,8 +1942,7 @@ static void rxq_deinit(struct rx_queue *rxq)
1944 } 1942 }
1945 1943
1946 if (rxq->rx_desc_count) { 1944 if (rxq->rx_desc_count) {
1947 dev_printk(KERN_ERR, &mp->dev->dev, 1945 netdev_err(mp->dev, "error freeing rx ring -- %d skbs stuck\n",
1948 "error freeing rx ring -- %d skbs stuck\n",
1949 rxq->rx_desc_count); 1946 rxq->rx_desc_count);
1950 } 1947 }
1951 1948
@@ -1987,7 +1984,7 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
1987 } 1984 }
1988 1985
1989 if (txq->tx_desc_area == NULL) { 1986 if (txq->tx_desc_area == NULL) {
1990 dev_printk(KERN_ERR, &mp->dev->dev, 1987 netdev_err(mp->dev,
1991 "can't allocate tx ring (%d bytes)\n", size); 1988 "can't allocate tx ring (%d bytes)\n", size);
1992 return -ENOMEM; 1989 return -ENOMEM;
1993 } 1990 }
@@ -2093,7 +2090,7 @@ static void handle_link_event(struct mv643xx_eth_private *mp)
2093 if (netif_carrier_ok(dev)) { 2090 if (netif_carrier_ok(dev)) {
2094 int i; 2091 int i;
2095 2092
2096 printk(KERN_INFO "%s: link down\n", dev->name); 2093 netdev_info(dev, "link down\n");
2097 2094
2098 netif_carrier_off(dev); 2095 netif_carrier_off(dev);
2099 2096
@@ -2124,10 +2121,8 @@ static void handle_link_event(struct mv643xx_eth_private *mp)
2124 duplex = (port_status & FULL_DUPLEX) ? 1 : 0; 2121 duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
2125 fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0; 2122 fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;
2126 2123
2127 printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, " 2124 netdev_info(dev, "link up, %d Mb/s, %s duplex, flow control %sabled\n",
2128 "flow control %sabled\n", dev->name, 2125 speed, duplex ? "full" : "half", fc ? "en" : "dis");
2129 speed, duplex ? "full" : "half",
2130 fc ? "en" : "dis");
2131 2126
2132 if (!netif_carrier_ok(dev)) 2127 if (!netif_carrier_ok(dev))
2133 netif_carrier_on(dev); 2128 netif_carrier_on(dev);
@@ -2337,7 +2332,7 @@ static int mv643xx_eth_open(struct net_device *dev)
2337 err = request_irq(dev->irq, mv643xx_eth_irq, 2332 err = request_irq(dev->irq, mv643xx_eth_irq,
2338 IRQF_SHARED, dev->name, dev); 2333 IRQF_SHARED, dev->name, dev);
2339 if (err) { 2334 if (err) {
2340 dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n"); 2335 netdev_err(dev, "can't assign irq\n");
2341 return -EAGAIN; 2336 return -EAGAIN;
2342 } 2337 }
2343 2338
@@ -2483,9 +2478,8 @@ static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
2483 */ 2478 */
2484 mv643xx_eth_stop(dev); 2479 mv643xx_eth_stop(dev);
2485 if (mv643xx_eth_open(dev)) { 2480 if (mv643xx_eth_open(dev)) {
2486 dev_printk(KERN_ERR, &dev->dev, 2481 netdev_err(dev,
2487 "fatal error on re-opening device after " 2482 "fatal error on re-opening device after MTU change\n");
2488 "MTU change\n");
2489 } 2483 }
2490 2484
2491 return 0; 2485 return 0;
@@ -2508,7 +2502,7 @@ static void mv643xx_eth_tx_timeout(struct net_device *dev)
2508{ 2502{
2509 struct mv643xx_eth_private *mp = netdev_priv(dev); 2503 struct mv643xx_eth_private *mp = netdev_priv(dev);
2510 2504
2511 dev_printk(KERN_INFO, &dev->dev, "tx timeout\n"); 2505 netdev_info(dev, "tx timeout\n");
2512 2506
2513 schedule_work(&mp->tx_timeout_task); 2507 schedule_work(&mp->tx_timeout_task);
2514} 2508}
@@ -2603,8 +2597,8 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
2603 int ret; 2597 int ret;
2604 2598
2605 if (!mv643xx_eth_version_printed++) 2599 if (!mv643xx_eth_version_printed++)
2606 printk(KERN_NOTICE "MV-643xx 10/100/1000 ethernet " 2600 pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n",
2607 "driver version %s\n", mv643xx_eth_driver_version); 2601 mv643xx_eth_driver_version);
2608 2602
2609 ret = -EINVAL; 2603 ret = -EINVAL;
2610 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2604 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2871,14 +2865,12 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2871 2865
2872 pd = pdev->dev.platform_data; 2866 pd = pdev->dev.platform_data;
2873 if (pd == NULL) { 2867 if (pd == NULL) {
2874 dev_printk(KERN_ERR, &pdev->dev, 2868 dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n");
2875 "no mv643xx_eth_platform_data\n");
2876 return -ENODEV; 2869 return -ENODEV;
2877 } 2870 }
2878 2871
2879 if (pd->shared == NULL) { 2872 if (pd->shared == NULL) {
2880 dev_printk(KERN_ERR, &pdev->dev, 2873 dev_err(&pdev->dev, "no mv643xx_eth_platform_data->shared\n");
2881 "no mv643xx_eth_platform_data->shared\n");
2882 return -ENODEV; 2874 return -ENODEV;
2883 } 2875 }
2884 2876
@@ -2957,11 +2949,11 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2957 if (err) 2949 if (err)
2958 goto out; 2950 goto out;
2959 2951
2960 dev_printk(KERN_NOTICE, &dev->dev, "port %d with MAC address %pM\n", 2952 netdev_notice(dev, "port %d with MAC address %pM\n",
2961 mp->port_num, dev->dev_addr); 2953 mp->port_num, dev->dev_addr);
2962 2954
2963 if (mp->tx_desc_sram_size > 0) 2955 if (mp->tx_desc_sram_size > 0)
2964 dev_printk(KERN_NOTICE, &dev->dev, "configured with sram\n"); 2956 netdev_notice(dev, "configured with sram\n");
2965 2957
2966 return 0; 2958 return 0;
2967 2959
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index ea5cfe2c3a0..a7f2eed9a08 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -253,7 +253,7 @@ struct myri10ge_priv {
253 unsigned long serial_number; 253 unsigned long serial_number;
254 int vendor_specific_offset; 254 int vendor_specific_offset;
255 int fw_multicast_support; 255 int fw_multicast_support;
256 unsigned long features; 256 u32 features;
257 u32 max_tso6; 257 u32 max_tso6;
258 u32 read_dma; 258 u32 read_dma;
259 u32 write_dma; 259 u32 write_dma;
@@ -1776,7 +1776,7 @@ static int myri10ge_set_rx_csum(struct net_device *netdev, u32 csum_enabled)
1776static int myri10ge_set_tso(struct net_device *netdev, u32 tso_enabled) 1776static int myri10ge_set_tso(struct net_device *netdev, u32 tso_enabled)
1777{ 1777{
1778 struct myri10ge_priv *mgp = netdev_priv(netdev); 1778 struct myri10ge_priv *mgp = netdev_priv(netdev);
1779 unsigned long flags = mgp->features & (NETIF_F_TSO6 | NETIF_F_TSO); 1779 u32 flags = mgp->features & (NETIF_F_TSO6 | NETIF_F_TSO);
1780 1780
1781 if (tso_enabled) 1781 if (tso_enabled)
1782 netdev->features |= flags; 1782 netdev->features |= flags;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 35fda5ac812..392a6c4b72e 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -77,7 +77,6 @@ config NATIONAL_PHY
77 Currently supports the DP83865 PHY. 77 Currently supports the DP83865 PHY.
78 78
79config STE10XP 79config STE10XP
80 depends on PHYLIB
81 tristate "Driver for STMicroelectronics STe10Xp PHYs" 80 tristate "Driver for STMicroelectronics STe10Xp PHYs"
82 ---help--- 81 ---help---
83 This is the driver for the STe100p and STe101p PHYs. 82 This is the driver for the STe100p and STe101p PHYs.
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 0fd1678bc5a..590f902deb6 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -19,13 +19,7 @@
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/phy.h> 21#include <linux/phy.h>
22 22#include <linux/micrel_phy.h>
23#define PHY_ID_KSZ9021 0x00221611
24#define PHY_ID_KS8737 0x00221720
25#define PHY_ID_KS8041 0x00221510
26#define PHY_ID_KS8051 0x00221550
27/* both for ks8001 Rev. A/B, and for ks8721 Rev 3. */
28#define PHY_ID_KS8001 0x0022161A
29 23
30/* general Interrupt control/status reg in vendor specific block. */ 24/* general Interrupt control/status reg in vendor specific block. */
31#define MII_KSZPHY_INTCS 0x1B 25#define MII_KSZPHY_INTCS 0x1B
@@ -46,6 +40,7 @@
46#define KSZPHY_CTRL_INT_ACTIVE_HIGH (1 << 9) 40#define KSZPHY_CTRL_INT_ACTIVE_HIGH (1 << 9)
47#define KSZ9021_CTRL_INT_ACTIVE_HIGH (1 << 14) 41#define KSZ9021_CTRL_INT_ACTIVE_HIGH (1 << 14)
48#define KS8737_CTRL_INT_ACTIVE_HIGH (1 << 14) 42#define KS8737_CTRL_INT_ACTIVE_HIGH (1 << 14)
43#define KSZ8051_RMII_50MHZ_CLK (1 << 7)
49 44
50static int kszphy_ack_interrupt(struct phy_device *phydev) 45static int kszphy_ack_interrupt(struct phy_device *phydev)
51{ 46{
@@ -106,6 +101,19 @@ static int kszphy_config_init(struct phy_device *phydev)
106 return 0; 101 return 0;
107} 102}
108 103
104static int ks8051_config_init(struct phy_device *phydev)
105{
106 int regval;
107
108 if (phydev->dev_flags & MICREL_PHY_50MHZ_CLK) {
109 regval = phy_read(phydev, MII_KSZPHY_CTRL);
110 regval |= KSZ8051_RMII_50MHZ_CLK;
111 phy_write(phydev, MII_KSZPHY_CTRL, regval);
112 }
113
114 return 0;
115}
116
109static struct phy_driver ks8737_driver = { 117static struct phy_driver ks8737_driver = {
110 .phy_id = PHY_ID_KS8737, 118 .phy_id = PHY_ID_KS8737,
111 .phy_id_mask = 0x00fffff0, 119 .phy_id_mask = 0x00fffff0,
@@ -142,7 +150,7 @@ static struct phy_driver ks8051_driver = {
142 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause 150 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
143 | SUPPORTED_Asym_Pause), 151 | SUPPORTED_Asym_Pause),
144 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, 152 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
145 .config_init = kszphy_config_init, 153 .config_init = ks8051_config_init,
146 .config_aneg = genphy_config_aneg, 154 .config_aneg = genphy_config_aneg,
147 .read_status = genphy_read_status, 155 .read_status = genphy_read_status,
148 .ack_interrupt = kszphy_ack_interrupt, 156 .ack_interrupt = kszphy_ack_interrupt,
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index c7a6c446697..9f6d670748d 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -592,8 +592,8 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
592 ppp_release(NULL, file); 592 ppp_release(NULL, file);
593 err = 0; 593 err = 0;
594 } else 594 } else
595 printk(KERN_DEBUG "PPPIOCDETACH file->f_count=%ld\n", 595 pr_warn("PPPIOCDETACH file->f_count=%ld\n",
596 atomic_long_read(&file->f_count)); 596 atomic_long_read(&file->f_count));
597 mutex_unlock(&ppp_mutex); 597 mutex_unlock(&ppp_mutex);
598 return err; 598 return err;
599 } 599 }
@@ -630,7 +630,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
630 630
631 if (pf->kind != INTERFACE) { 631 if (pf->kind != INTERFACE) {
632 /* can't happen */ 632 /* can't happen */
633 printk(KERN_ERR "PPP: not interface or channel??\n"); 633 pr_err("PPP: not interface or channel??\n");
634 return -EINVAL; 634 return -EINVAL;
635 } 635 }
636 636
@@ -704,7 +704,8 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
704 } 704 }
705 vj = slhc_init(val2+1, val+1); 705 vj = slhc_init(val2+1, val+1);
706 if (!vj) { 706 if (!vj) {
707 printk(KERN_ERR "PPP: no memory (VJ compressor)\n"); 707 netdev_err(ppp->dev,
708 "PPP: no memory (VJ compressor)\n");
708 err = -ENOMEM; 709 err = -ENOMEM;
709 break; 710 break;
710 } 711 }
@@ -898,17 +899,17 @@ static int __init ppp_init(void)
898{ 899{
899 int err; 900 int err;
900 901
901 printk(KERN_INFO "PPP generic driver version " PPP_VERSION "\n"); 902 pr_info("PPP generic driver version " PPP_VERSION "\n");
902 903
903 err = register_pernet_device(&ppp_net_ops); 904 err = register_pernet_device(&ppp_net_ops);
904 if (err) { 905 if (err) {
905 printk(KERN_ERR "failed to register PPP pernet device (%d)\n", err); 906 pr_err("failed to register PPP pernet device (%d)\n", err);
906 goto out; 907 goto out;
907 } 908 }
908 909
909 err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops); 910 err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops);
910 if (err) { 911 if (err) {
911 printk(KERN_ERR "failed to register PPP device (%d)\n", err); 912 pr_err("failed to register PPP device (%d)\n", err);
912 goto out_net; 913 goto out_net;
913 } 914 }
914 915
@@ -1078,7 +1079,7 @@ pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
1078 new_skb = alloc_skb(new_skb_size, GFP_ATOMIC); 1079 new_skb = alloc_skb(new_skb_size, GFP_ATOMIC);
1079 if (!new_skb) { 1080 if (!new_skb) {
1080 if (net_ratelimit()) 1081 if (net_ratelimit())
1081 printk(KERN_ERR "PPP: no memory (comp pkt)\n"); 1082 netdev_err(ppp->dev, "PPP: no memory (comp pkt)\n");
1082 return NULL; 1083 return NULL;
1083 } 1084 }
1084 if (ppp->dev->hard_header_len > PPP_HDRLEN) 1085 if (ppp->dev->hard_header_len > PPP_HDRLEN)
@@ -1108,7 +1109,7 @@ pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
1108 * the same number. 1109 * the same number.
1109 */ 1110 */
1110 if (net_ratelimit()) 1111 if (net_ratelimit())
1111 printk(KERN_ERR "ppp: compressor dropped pkt\n"); 1112 netdev_err(ppp->dev, "ppp: compressor dropped pkt\n");
1112 kfree_skb(skb); 1113 kfree_skb(skb);
1113 kfree_skb(new_skb); 1114 kfree_skb(new_skb);
1114 new_skb = NULL; 1115 new_skb = NULL;
@@ -1138,7 +1139,9 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
1138 if (ppp->pass_filter && 1139 if (ppp->pass_filter &&
1139 sk_run_filter(skb, ppp->pass_filter) == 0) { 1140 sk_run_filter(skb, ppp->pass_filter) == 0) {
1140 if (ppp->debug & 1) 1141 if (ppp->debug & 1)
1141 printk(KERN_DEBUG "PPP: outbound frame not passed\n"); 1142 netdev_printk(KERN_DEBUG, ppp->dev,
1143 "PPP: outbound frame "
1144 "not passed\n");
1142 kfree_skb(skb); 1145 kfree_skb(skb);
1143 return; 1146 return;
1144 } 1147 }
@@ -1164,7 +1167,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
1164 new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2, 1167 new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2,
1165 GFP_ATOMIC); 1168 GFP_ATOMIC);
1166 if (!new_skb) { 1169 if (!new_skb) {
1167 printk(KERN_ERR "PPP: no memory (VJ comp pkt)\n"); 1170 netdev_err(ppp->dev, "PPP: no memory (VJ comp pkt)\n");
1168 goto drop; 1171 goto drop;
1169 } 1172 }
1170 skb_reserve(new_skb, ppp->dev->hard_header_len - 2); 1173 skb_reserve(new_skb, ppp->dev->hard_header_len - 2);
@@ -1202,7 +1205,9 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
1202 proto != PPP_LCP && proto != PPP_CCP) { 1205 proto != PPP_LCP && proto != PPP_CCP) {
1203 if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) { 1206 if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) {
1204 if (net_ratelimit()) 1207 if (net_ratelimit())
1205 printk(KERN_ERR "ppp: compression required but down - pkt dropped.\n"); 1208 netdev_err(ppp->dev,
1209 "ppp: compression required but "
1210 "down - pkt dropped.\n");
1206 goto drop; 1211 goto drop;
1207 } 1212 }
1208 skb = pad_compress_skb(ppp, skb); 1213 skb = pad_compress_skb(ppp, skb);
@@ -1505,7 +1510,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1505 noskb: 1510 noskb:
1506 spin_unlock_bh(&pch->downl); 1511 spin_unlock_bh(&pch->downl);
1507 if (ppp->debug & 1) 1512 if (ppp->debug & 1)
1508 printk(KERN_ERR "PPP: no memory (fragment)\n"); 1513 netdev_err(ppp->dev, "PPP: no memory (fragment)\n");
1509 ++ppp->dev->stats.tx_errors; 1514 ++ppp->dev->stats.tx_errors;
1510 ++ppp->nxseq; 1515 ++ppp->nxseq;
1511 return 1; /* abandon the frame */ 1516 return 1; /* abandon the frame */
@@ -1686,7 +1691,8 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
1686 /* copy to a new sk_buff with more tailroom */ 1691 /* copy to a new sk_buff with more tailroom */
1687 ns = dev_alloc_skb(skb->len + 128); 1692 ns = dev_alloc_skb(skb->len + 128);
1688 if (!ns) { 1693 if (!ns) {
1689 printk(KERN_ERR"PPP: no memory (VJ decomp)\n"); 1694 netdev_err(ppp->dev, "PPP: no memory "
1695 "(VJ decomp)\n");
1690 goto err; 1696 goto err;
1691 } 1697 }
1692 skb_reserve(ns, 2); 1698 skb_reserve(ns, 2);
@@ -1699,7 +1705,8 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
1699 1705
1700 len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2); 1706 len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2);
1701 if (len <= 0) { 1707 if (len <= 0) {
1702 printk(KERN_DEBUG "PPP: VJ decompression error\n"); 1708 netdev_printk(KERN_DEBUG, ppp->dev,
1709 "PPP: VJ decompression error\n");
1703 goto err; 1710 goto err;
1704 } 1711 }
1705 len += 2; 1712 len += 2;
@@ -1721,7 +1728,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
1721 goto err; 1728 goto err;
1722 1729
1723 if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) { 1730 if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) {
1724 printk(KERN_ERR "PPP: VJ uncompressed error\n"); 1731 netdev_err(ppp->dev, "PPP: VJ uncompressed error\n");
1725 goto err; 1732 goto err;
1726 } 1733 }
1727 proto = PPP_IP; 1734 proto = PPP_IP;
@@ -1762,8 +1769,9 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
1762 if (ppp->pass_filter && 1769 if (ppp->pass_filter &&
1763 sk_run_filter(skb, ppp->pass_filter) == 0) { 1770 sk_run_filter(skb, ppp->pass_filter) == 0) {
1764 if (ppp->debug & 1) 1771 if (ppp->debug & 1)
1765 printk(KERN_DEBUG "PPP: inbound frame " 1772 netdev_printk(KERN_DEBUG, ppp->dev,
1766 "not passed\n"); 1773 "PPP: inbound frame "
1774 "not passed\n");
1767 kfree_skb(skb); 1775 kfree_skb(skb);
1768 return; 1776 return;
1769 } 1777 }
@@ -1821,7 +1829,8 @@ ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb)
1821 1829
1822 ns = dev_alloc_skb(obuff_size); 1830 ns = dev_alloc_skb(obuff_size);
1823 if (!ns) { 1831 if (!ns) {
1824 printk(KERN_ERR "ppp_decompress_frame: no memory\n"); 1832 netdev_err(ppp->dev, "ppp_decompress_frame: "
1833 "no memory\n");
1825 goto err; 1834 goto err;
1826 } 1835 }
1827 /* the decompressor still expects the A/C bytes in the hdr */ 1836 /* the decompressor still expects the A/C bytes in the hdr */
@@ -1989,7 +1998,7 @@ ppp_mp_reconstruct(struct ppp *ppp)
1989 u32 seq = ppp->nextseq; 1998 u32 seq = ppp->nextseq;
1990 u32 minseq = ppp->minseq; 1999 u32 minseq = ppp->minseq;
1991 struct sk_buff_head *list = &ppp->mrq; 2000 struct sk_buff_head *list = &ppp->mrq;
1992 struct sk_buff *p, *next; 2001 struct sk_buff *p, *tmp;
1993 struct sk_buff *head, *tail; 2002 struct sk_buff *head, *tail;
1994 struct sk_buff *skb = NULL; 2003 struct sk_buff *skb = NULL;
1995 int lost = 0, len = 0; 2004 int lost = 0, len = 0;
@@ -1998,13 +2007,15 @@ ppp_mp_reconstruct(struct ppp *ppp)
1998 return NULL; 2007 return NULL;
1999 head = list->next; 2008 head = list->next;
2000 tail = NULL; 2009 tail = NULL;
2001 for (p = head; p != (struct sk_buff *) list; p = next) { 2010 skb_queue_walk_safe(list, p, tmp) {
2002 next = p->next; 2011 again:
2003 if (seq_before(PPP_MP_CB(p)->sequence, seq)) { 2012 if (seq_before(PPP_MP_CB(p)->sequence, seq)) {
2004 /* this can't happen, anyway ignore the skb */ 2013 /* this can't happen, anyway ignore the skb */
2005 printk(KERN_ERR "ppp_mp_reconstruct bad seq %u < %u\n", 2014 netdev_err(ppp->dev, "ppp_mp_reconstruct bad "
2006 PPP_MP_CB(p)->sequence, seq); 2015 "seq %u < %u\n",
2007 head = next; 2016 PPP_MP_CB(p)->sequence, seq);
2017 __skb_unlink(p, list);
2018 kfree_skb(p);
2008 continue; 2019 continue;
2009 } 2020 }
2010 if (PPP_MP_CB(p)->sequence != seq) { 2021 if (PPP_MP_CB(p)->sequence != seq) {
@@ -2016,8 +2027,7 @@ ppp_mp_reconstruct(struct ppp *ppp)
2016 lost = 1; 2027 lost = 1;
2017 seq = seq_before(minseq, PPP_MP_CB(p)->sequence)? 2028 seq = seq_before(minseq, PPP_MP_CB(p)->sequence)?
2018 minseq + 1: PPP_MP_CB(p)->sequence; 2029 minseq + 1: PPP_MP_CB(p)->sequence;
2019 next = p; 2030 goto again;
2020 continue;
2021 } 2031 }
2022 2032
2023 /* 2033 /*
@@ -2042,17 +2052,9 @@ ppp_mp_reconstruct(struct ppp *ppp)
2042 (PPP_MP_CB(head)->BEbits & B)) { 2052 (PPP_MP_CB(head)->BEbits & B)) {
2043 if (len > ppp->mrru + 2) { 2053 if (len > ppp->mrru + 2) {
2044 ++ppp->dev->stats.rx_length_errors; 2054 ++ppp->dev->stats.rx_length_errors;
2045 printk(KERN_DEBUG "PPP: reconstructed packet" 2055 netdev_printk(KERN_DEBUG, ppp->dev,
2046 " is too long (%d)\n", len); 2056 "PPP: reconstructed packet"
2047 } else if (p == head) { 2057 " is too long (%d)\n", len);
2048 /* fragment is complete packet - reuse skb */
2049 tail = p;
2050 skb = skb_get(p);
2051 break;
2052 } else if ((skb = dev_alloc_skb(len)) == NULL) {
2053 ++ppp->dev->stats.rx_missed_errors;
2054 printk(KERN_DEBUG "PPP: no memory for "
2055 "reconstructed packet");
2056 } else { 2058 } else {
2057 tail = p; 2059 tail = p;
2058 break; 2060 break;
@@ -2065,9 +2067,17 @@ ppp_mp_reconstruct(struct ppp *ppp)
2065 * and we haven't found a complete valid packet yet, 2067 * and we haven't found a complete valid packet yet,
2066 * we can discard up to and including this fragment. 2068 * we can discard up to and including this fragment.
2067 */ 2069 */
2068 if (PPP_MP_CB(p)->BEbits & E) 2070 if (PPP_MP_CB(p)->BEbits & E) {
2069 head = next; 2071 struct sk_buff *tmp2;
2070 2072
2073 skb_queue_reverse_walk_from_safe(list, p, tmp2) {
2074 __skb_unlink(p, list);
2075 kfree_skb(p);
2076 }
2077 head = skb_peek(list);
2078 if (!head)
2079 break;
2080 }
2071 ++seq; 2081 ++seq;
2072 } 2082 }
2073 2083
@@ -2077,26 +2087,37 @@ ppp_mp_reconstruct(struct ppp *ppp)
2077 signal a receive error. */ 2087 signal a receive error. */
2078 if (PPP_MP_CB(head)->sequence != ppp->nextseq) { 2088 if (PPP_MP_CB(head)->sequence != ppp->nextseq) {
2079 if (ppp->debug & 1) 2089 if (ppp->debug & 1)
2080 printk(KERN_DEBUG " missed pkts %u..%u\n", 2090 netdev_printk(KERN_DEBUG, ppp->dev,
2081 ppp->nextseq, 2091 " missed pkts %u..%u\n",
2082 PPP_MP_CB(head)->sequence-1); 2092 ppp->nextseq,
2093 PPP_MP_CB(head)->sequence-1);
2083 ++ppp->dev->stats.rx_dropped; 2094 ++ppp->dev->stats.rx_dropped;
2084 ppp_receive_error(ppp); 2095 ppp_receive_error(ppp);
2085 } 2096 }
2086 2097
2087 if (head != tail) 2098 skb = head;
2088 /* copy to a single skb */ 2099 if (head != tail) {
2089 for (p = head; p != tail->next; p = p->next) 2100 struct sk_buff **fragpp = &skb_shinfo(skb)->frag_list;
2090 skb_copy_bits(p, 0, skb_put(skb, p->len), p->len); 2101 p = skb_queue_next(list, head);
2091 ppp->nextseq = PPP_MP_CB(tail)->sequence + 1; 2102 __skb_unlink(skb, list);
2092 head = tail->next; 2103 skb_queue_walk_from_safe(list, p, tmp) {
2093 } 2104 __skb_unlink(p, list);
2105 *fragpp = p;
2106 p->next = NULL;
2107 fragpp = &p->next;
2108
2109 skb->len += p->len;
2110 skb->data_len += p->len;
2111 skb->truesize += p->len;
2112
2113 if (p == tail)
2114 break;
2115 }
2116 } else {
2117 __skb_unlink(skb, list);
2118 }
2094 2119
2095 /* Discard all the skbuffs that we have copied the data out of 2120 ppp->nextseq = PPP_MP_CB(tail)->sequence + 1;
2096 or that we can't use. */
2097 while ((p = list->next) != head) {
2098 __skb_unlink(p, list);
2099 kfree_skb(p);
2100 } 2121 }
2101 2122
2102 return skb; 2123 return skb;
@@ -2617,8 +2638,8 @@ ppp_create_interface(struct net *net, int unit, int *retp)
2617 ret = register_netdev(dev); 2638 ret = register_netdev(dev);
2618 if (ret != 0) { 2639 if (ret != 0) {
2619 unit_put(&pn->units_idr, unit); 2640 unit_put(&pn->units_idr, unit);
2620 printk(KERN_ERR "PPP: couldn't register device %s (%d)\n", 2641 netdev_err(ppp->dev, "PPP: couldn't register device %s (%d)\n",
2621 dev->name, ret); 2642 dev->name, ret);
2622 goto out2; 2643 goto out2;
2623 } 2644 }
2624 2645
@@ -2690,9 +2711,9 @@ static void ppp_destroy_interface(struct ppp *ppp)
2690 2711
2691 if (!ppp->file.dead || ppp->n_channels) { 2712 if (!ppp->file.dead || ppp->n_channels) {
2692 /* "can't happen" */ 2713 /* "can't happen" */
2693 printk(KERN_ERR "ppp: destroying ppp struct %p but dead=%d " 2714 netdev_err(ppp->dev, "ppp: destroying ppp struct %p "
2694 "n_channels=%d !\n", ppp, ppp->file.dead, 2715 "but dead=%d n_channels=%d !\n",
2695 ppp->n_channels); 2716 ppp, ppp->file.dead, ppp->n_channels);
2696 return; 2717 return;
2697 } 2718 }
2698 2719
@@ -2834,8 +2855,7 @@ static void ppp_destroy_channel(struct channel *pch)
2834 2855
2835 if (!pch->file.dead) { 2856 if (!pch->file.dead) {
2836 /* "can't happen" */ 2857 /* "can't happen" */
2837 printk(KERN_ERR "ppp: destroying undead channel %p !\n", 2858 pr_err("ppp: destroying undead channel %p !\n", pch);
2838 pch);
2839 return; 2859 return;
2840 } 2860 }
2841 skb_queue_purge(&pch->file.xq); 2861 skb_queue_purge(&pch->file.xq);
@@ -2847,7 +2867,7 @@ static void __exit ppp_cleanup(void)
2847{ 2867{
2848 /* should never happen */ 2868 /* should never happen */
2849 if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count)) 2869 if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count))
2850 printk(KERN_ERR "PPP: removing module but units remain!\n"); 2870 pr_err("PPP: removing module but units remain!\n");
2851 unregister_chrdev(PPP_MAJOR, "ppp"); 2871 unregister_chrdev(PPP_MAJOR, "ppp");
2852 device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0)); 2872 device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0));
2853 class_destroy(ppp_class); 2873 class_destroy(ppp_class);
@@ -2865,7 +2885,7 @@ static int __unit_alloc(struct idr *p, void *ptr, int n)
2865 2885
2866again: 2886again:
2867 if (!idr_pre_get(p, GFP_KERNEL)) { 2887 if (!idr_pre_get(p, GFP_KERNEL)) {
2868 printk(KERN_ERR "PPP: No free memory for idr\n"); 2888 pr_err("PPP: No free memory for idr\n");
2869 return -ENOMEM; 2889 return -ENOMEM;
2870 } 2890 }
2871 2891
diff --git a/drivers/net/pptp.c b/drivers/net/pptp.c
index 164cfad6ce7..1af549c89d5 100644
--- a/drivers/net/pptp.c
+++ b/drivers/net/pptp.c
@@ -175,7 +175,6 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
175 struct pptp_opt *opt = &po->proto.pptp; 175 struct pptp_opt *opt = &po->proto.pptp;
176 struct pptp_gre_header *hdr; 176 struct pptp_gre_header *hdr;
177 unsigned int header_len = sizeof(*hdr); 177 unsigned int header_len = sizeof(*hdr);
178 int err = 0;
179 int islcp; 178 int islcp;
180 int len; 179 int len;
181 unsigned char *data; 180 unsigned char *data;
@@ -198,8 +197,8 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
198 .saddr = opt->src_addr.sin_addr.s_addr, 197 .saddr = opt->src_addr.sin_addr.s_addr,
199 .tos = RT_TOS(0) } }, 198 .tos = RT_TOS(0) } },
200 .proto = IPPROTO_GRE }; 199 .proto = IPPROTO_GRE };
201 err = ip_route_output_key(&init_net, &rt, &fl); 200 rt = ip_route_output_key(&init_net, &fl);
202 if (err) 201 if (IS_ERR(rt))
203 goto tx_error; 202 goto tx_error;
204 } 203 }
205 tdev = rt->dst.dev; 204 tdev = rt->dst.dev;
@@ -477,7 +476,8 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
477 .tos = RT_CONN_FLAGS(sk) } }, 476 .tos = RT_CONN_FLAGS(sk) } },
478 .proto = IPPROTO_GRE }; 477 .proto = IPPROTO_GRE };
479 security_sk_classify_flow(sk, &fl); 478 security_sk_classify_flow(sk, &fl);
480 if (ip_route_output_key(&init_net, &rt, &fl)) { 479 rt = ip_route_output_key(&init_net, &fl);
480 if (IS_ERR(rt)) {
481 error = -EHOSTUNREACH; 481 error = -EHOSTUNREACH;
482 goto end; 482 goto end;
483 } 483 }
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 1a3584edd79..2d21c60085b 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -379,7 +379,7 @@ static void fm93c56a_select(struct ql3_adapter *qdev)
379{ 379{
380 struct ql3xxx_port_registers __iomem *port_regs = 380 struct ql3xxx_port_registers __iomem *port_regs =
381 qdev->mem_map_registers; 381 qdev->mem_map_registers;
382 u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 382 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
383 383
384 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; 384 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
385 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); 385 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
@@ -398,7 +398,7 @@ static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
398 u32 previousBit; 398 u32 previousBit;
399 struct ql3xxx_port_registers __iomem *port_regs = 399 struct ql3xxx_port_registers __iomem *port_regs =
400 qdev->mem_map_registers; 400 qdev->mem_map_registers;
401 u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 401 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
402 402
403 /* Clock in a zero, then do the start bit */ 403 /* Clock in a zero, then do the start bit */
404 ql_write_nvram_reg(qdev, spir, 404 ql_write_nvram_reg(qdev, spir,
@@ -467,7 +467,7 @@ static void fm93c56a_deselect(struct ql3_adapter *qdev)
467{ 467{
468 struct ql3xxx_port_registers __iomem *port_regs = 468 struct ql3xxx_port_registers __iomem *port_regs =
469 qdev->mem_map_registers; 469 qdev->mem_map_registers;
470 u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 470 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
471 471
472 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0; 472 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
473 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); 473 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
@@ -483,7 +483,7 @@ static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
483 u32 dataBit; 483 u32 dataBit;
484 struct ql3xxx_port_registers __iomem *port_regs = 484 struct ql3xxx_port_registers __iomem *port_regs =
485 qdev->mem_map_registers; 485 qdev->mem_map_registers;
486 u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 486 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
487 487
488 /* Read the data bits */ 488 /* Read the data bits */
489 /* The first bit is a dummy. Clock right over it. */ 489 /* The first bit is a dummy. Clock right over it. */
@@ -3011,7 +3011,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
3011 u32 value; 3011 u32 value;
3012 struct ql3xxx_port_registers __iomem *port_regs = 3012 struct ql3xxx_port_registers __iomem *port_regs =
3013 qdev->mem_map_registers; 3013 qdev->mem_map_registers;
3014 u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 3014 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
3015 struct ql3xxx_host_memory_registers __iomem *hmem_regs = 3015 struct ql3xxx_host_memory_registers __iomem *hmem_regs =
3016 (void __iomem *)port_regs; 3016 (void __iomem *)port_regs;
3017 u32 delay = 10; 3017 u32 delay = 10;
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index 44e316fd67b..dc44564ef6f 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -867,7 +867,6 @@ struct qlcnic_nic_intr_coalesce {
867#define LINKEVENT_LINKSPEED_MBPS 0 867#define LINKEVENT_LINKSPEED_MBPS 0
868#define LINKEVENT_LINKSPEED_ENCODED 1 868#define LINKEVENT_LINKSPEED_ENCODED 1
869 869
870#define AUTO_FW_RESET_ENABLED 0x01
871/* firmware response header: 870/* firmware response header:
872 * 63:58 - message type 871 * 63:58 - message type
873 * 57:56 - owner 872 * 57:56 - owner
@@ -1133,14 +1132,10 @@ struct qlcnic_eswitch {
1133#define MAX_BW 100 /* % of link speed */ 1132#define MAX_BW 100 /* % of link speed */
1134#define MAX_VLAN_ID 4095 1133#define MAX_VLAN_ID 4095
1135#define MIN_VLAN_ID 2 1134#define MIN_VLAN_ID 2
1136#define MAX_TX_QUEUES 1
1137#define MAX_RX_QUEUES 4
1138#define DEFAULT_MAC_LEARN 1 1135#define DEFAULT_MAC_LEARN 1
1139 1136
1140#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan < MAX_VLAN_ID) 1137#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan < MAX_VLAN_ID)
1141#define IS_VALID_BW(bw) (bw <= MAX_BW) 1138#define IS_VALID_BW(bw) (bw <= MAX_BW)
1142#define IS_VALID_TX_QUEUES(que) (que > 0 && que <= MAX_TX_QUEUES)
1143#define IS_VALID_RX_QUEUES(que) (que > 0 && que <= MAX_RX_QUEUES)
1144 1139
1145struct qlcnic_pci_func_cfg { 1140struct qlcnic_pci_func_cfg {
1146 u16 func_type; 1141 u16 func_type;
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index 37c04b4fade..cd88c7e1bfa 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -42,7 +42,7 @@ static int use_msi_x = 1;
42module_param(use_msi_x, int, 0444); 42module_param(use_msi_x, int, 0444);
43MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled"); 43MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
44 44
45static int auto_fw_reset = AUTO_FW_RESET_ENABLED; 45static int auto_fw_reset = 1;
46module_param(auto_fw_reset, int, 0644); 46module_param(auto_fw_reset, int, 0644);
47MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled"); 47MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
48 48
@@ -2959,8 +2959,7 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
2959 if (adapter->need_fw_reset) 2959 if (adapter->need_fw_reset)
2960 goto detach; 2960 goto detach;
2961 2961
2962 if (adapter->reset_context && 2962 if (adapter->reset_context && auto_fw_reset) {
2963 auto_fw_reset == AUTO_FW_RESET_ENABLED) {
2964 qlcnic_reset_hw_context(adapter); 2963 qlcnic_reset_hw_context(adapter);
2965 adapter->netdev->trans_start = jiffies; 2964 adapter->netdev->trans_start = jiffies;
2966 } 2965 }
@@ -2973,7 +2972,7 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
2973 2972
2974 qlcnic_dev_request_reset(adapter); 2973 qlcnic_dev_request_reset(adapter);
2975 2974
2976 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED)) 2975 if (auto_fw_reset)
2977 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state); 2976 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
2978 2977
2979 dev_info(&netdev->dev, "firmware hang detected\n"); 2978 dev_info(&netdev->dev, "firmware hang detected\n");
@@ -2982,7 +2981,7 @@ detach:
2982 adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state : 2981 adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
2983 QLCNIC_DEV_NEED_RESET; 2982 QLCNIC_DEV_NEED_RESET;
2984 2983
2985 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) && 2984 if (auto_fw_reset &&
2986 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) { 2985 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
2987 2986
2988 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0); 2987 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
@@ -3654,10 +3653,8 @@ validate_npar_config(struct qlcnic_adapter *adapter,
3654 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC) 3653 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3655 return QL_STATUS_INVALID_PARAM; 3654 return QL_STATUS_INVALID_PARAM;
3656 3655
3657 if (!IS_VALID_BW(np_cfg[i].min_bw) 3656 if (!IS_VALID_BW(np_cfg[i].min_bw) ||
3658 || !IS_VALID_BW(np_cfg[i].max_bw) 3657 !IS_VALID_BW(np_cfg[i].max_bw))
3659 || !IS_VALID_RX_QUEUES(np_cfg[i].max_rx_queues)
3660 || !IS_VALID_TX_QUEUES(np_cfg[i].max_tx_queues))
3661 return QL_STATUS_INVALID_PARAM; 3658 return QL_STATUS_INVALID_PARAM;
3662 } 3659 }
3663 return 0; 3660 return 0;
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 27e6f6d43ca..e3ebd90ae65 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -49,8 +49,8 @@
49#include <asm/processor.h> 49#include <asm/processor.h>
50 50
51#define DRV_NAME "r6040" 51#define DRV_NAME "r6040"
52#define DRV_VERSION "0.26" 52#define DRV_VERSION "0.27"
53#define DRV_RELDATE "30May2010" 53#define DRV_RELDATE "23Feb2011"
54 54
55/* PHY CHIP Address */ 55/* PHY CHIP Address */
56#define PHY1_ADDR 1 /* For MAC1 */ 56#define PHY1_ADDR 1 /* For MAC1 */
@@ -69,6 +69,8 @@
69 69
70/* MAC registers */ 70/* MAC registers */
71#define MCR0 0x00 /* Control register 0 */ 71#define MCR0 0x00 /* Control register 0 */
72#define MCR0_PROMISC 0x0020 /* Promiscuous mode */
73#define MCR0_HASH_EN 0x0100 /* Enable multicast hash table function */
72#define MCR1 0x04 /* Control register 1 */ 74#define MCR1 0x04 /* Control register 1 */
73#define MAC_RST 0x0001 /* Reset the MAC */ 75#define MAC_RST 0x0001 /* Reset the MAC */
74#define MBCR 0x08 /* Bus control */ 76#define MBCR 0x08 /* Bus control */
@@ -851,77 +853,92 @@ static void r6040_multicast_list(struct net_device *dev)
851{ 853{
852 struct r6040_private *lp = netdev_priv(dev); 854 struct r6040_private *lp = netdev_priv(dev);
853 void __iomem *ioaddr = lp->base; 855 void __iomem *ioaddr = lp->base;
854 u16 *adrp;
855 u16 reg;
856 unsigned long flags; 856 unsigned long flags;
857 struct netdev_hw_addr *ha; 857 struct netdev_hw_addr *ha;
858 int i; 858 int i;
859 u16 *adrp;
860 u16 hash_table[4] = { 0 };
861
862 spin_lock_irqsave(&lp->lock, flags);
859 863
860 /* MAC Address */ 864 /* Keep our MAC Address */
861 adrp = (u16 *)dev->dev_addr; 865 adrp = (u16 *)dev->dev_addr;
862 iowrite16(adrp[0], ioaddr + MID_0L); 866 iowrite16(adrp[0], ioaddr + MID_0L);
863 iowrite16(adrp[1], ioaddr + MID_0M); 867 iowrite16(adrp[1], ioaddr + MID_0M);
864 iowrite16(adrp[2], ioaddr + MID_0H); 868 iowrite16(adrp[2], ioaddr + MID_0H);
865 869
866 /* Promiscous Mode */
867 spin_lock_irqsave(&lp->lock, flags);
868
869 /* Clear AMCP & PROM bits */ 870 /* Clear AMCP & PROM bits */
870 reg = ioread16(ioaddr) & ~0x0120; 871 lp->mcr0 = ioread16(ioaddr + MCR0) & ~(MCR0_PROMISC | MCR0_HASH_EN);
871 if (dev->flags & IFF_PROMISC) {
872 reg |= 0x0020;
873 lp->mcr0 |= 0x0020;
874 }
875 /* Too many multicast addresses
876 * accept all traffic */
877 else if ((netdev_mc_count(dev) > MCAST_MAX) ||
878 (dev->flags & IFF_ALLMULTI))
879 reg |= 0x0020;
880 872
881 iowrite16(reg, ioaddr); 873 /* Promiscuous mode */
882 spin_unlock_irqrestore(&lp->lock, flags); 874 if (dev->flags & IFF_PROMISC)
875 lp->mcr0 |= MCR0_PROMISC;
883 876
884 /* Build the hash table */ 877 /* Enable multicast hash table function to
885 if (netdev_mc_count(dev) > MCAST_MAX) { 878 * receive all multicast packets. */
886 u16 hash_table[4]; 879 else if (dev->flags & IFF_ALLMULTI) {
887 u32 crc; 880 lp->mcr0 |= MCR0_HASH_EN;
888 881
889 for (i = 0; i < 4; i++) 882 for (i = 0; i < MCAST_MAX ; i++) {
890 hash_table[i] = 0; 883 iowrite16(0, ioaddr + MID_1L + 8 * i);
884 iowrite16(0, ioaddr + MID_1M + 8 * i);
885 iowrite16(0, ioaddr + MID_1H + 8 * i);
886 }
891 887
888 for (i = 0; i < 4; i++)
889 hash_table[i] = 0xffff;
890 }
891 /* Use internal multicast address registers if the number of
892 * multicast addresses is not greater than MCAST_MAX. */
893 else if (netdev_mc_count(dev) <= MCAST_MAX) {
894 i = 0;
892 netdev_for_each_mc_addr(ha, dev) { 895 netdev_for_each_mc_addr(ha, dev) {
893 char *addrs = ha->addr; 896 u16 *adrp = (u16 *) ha->addr;
897 iowrite16(adrp[0], ioaddr + MID_1L + 8 * i);
898 iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
899 iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
900 i++;
901 }
902 while (i < MCAST_MAX) {
903 iowrite16(0, ioaddr + MID_1L + 8 * i);
904 iowrite16(0, ioaddr + MID_1M + 8 * i);
905 iowrite16(0, ioaddr + MID_1H + 8 * i);
906 i++;
907 }
908 }
909 /* Otherwise, Enable multicast hash table function. */
910 else {
911 u32 crc;
894 912
895 if (!(*addrs & 1)) 913 lp->mcr0 |= MCR0_HASH_EN;
896 continue; 914
915 for (i = 0; i < MCAST_MAX ; i++) {
916 iowrite16(0, ioaddr + MID_1L + 8 * i);
917 iowrite16(0, ioaddr + MID_1M + 8 * i);
918 iowrite16(0, ioaddr + MID_1H + 8 * i);
919 }
897 920
898 crc = ether_crc_le(6, addrs); 921 /* Build multicast hash table */
922 netdev_for_each_mc_addr(ha, dev) {
923 u8 *addrs = ha->addr;
924
925 crc = ether_crc(ETH_ALEN, addrs);
899 crc >>= 26; 926 crc >>= 26;
900 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); 927 hash_table[crc >> 4] |= 1 << (crc & 0xf);
901 } 928 }
902 /* Fill the MAC hash tables with their values */ 929 }
930
931 iowrite16(lp->mcr0, ioaddr + MCR0);
932
933 /* Fill the MAC hash tables with their values */
934 if (lp->mcr0 && MCR0_HASH_EN) {
903 iowrite16(hash_table[0], ioaddr + MAR0); 935 iowrite16(hash_table[0], ioaddr + MAR0);
904 iowrite16(hash_table[1], ioaddr + MAR1); 936 iowrite16(hash_table[1], ioaddr + MAR1);
905 iowrite16(hash_table[2], ioaddr + MAR2); 937 iowrite16(hash_table[2], ioaddr + MAR2);
906 iowrite16(hash_table[3], ioaddr + MAR3); 938 iowrite16(hash_table[3], ioaddr + MAR3);
907 } 939 }
908 /* Multicast Address 1~4 case */ 940
909 i = 0; 941 spin_unlock_irqrestore(&lp->lock, flags);
910 netdev_for_each_mc_addr(ha, dev) {
911 if (i >= MCAST_MAX)
912 break;
913 adrp = (u16 *) ha->addr;
914 iowrite16(adrp[0], ioaddr + MID_1L + 8 * i);
915 iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
916 iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
917 i++;
918 }
919 while (i < MCAST_MAX) {
920 iowrite16(0xffff, ioaddr + MID_1L + 8 * i);
921 iowrite16(0xffff, ioaddr + MID_1M + 8 * i);
922 iowrite16(0xffff, ioaddr + MID_1H + 8 * i);
923 i++;
924 }
925} 942}
926 943
927static void netdev_get_drvinfo(struct net_device *dev, 944static void netdev_get_drvinfo(struct net_device *dev,
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 002bac74384..d563049859a 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2009 Solarflare Communications Inc. 4 * Copyright 2005-2011 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -21,6 +21,7 @@
21#include <linux/ethtool.h> 21#include <linux/ethtool.h>
22#include <linux/topology.h> 22#include <linux/topology.h>
23#include <linux/gfp.h> 23#include <linux/gfp.h>
24#include <linux/cpu_rmap.h>
24#include "net_driver.h" 25#include "net_driver.h"
25#include "efx.h" 26#include "efx.h"
26#include "nic.h" 27#include "nic.h"
@@ -307,6 +308,8 @@ static int efx_poll(struct napi_struct *napi, int budget)
307 channel->irq_mod_score = 0; 308 channel->irq_mod_score = 0;
308 } 309 }
309 310
311 efx_filter_rfs_expire(channel);
312
310 /* There is no race here; although napi_disable() will 313 /* There is no race here; although napi_disable() will
311 * only wait for napi_complete(), this isn't a problem 314 * only wait for napi_complete(), this isn't a problem
312 * since efx_channel_processed() will have no effect if 315 * since efx_channel_processed() will have no effect if
@@ -673,7 +676,7 @@ static void efx_fini_channels(struct efx_nic *efx)
673 676
674 efx_for_each_channel_rx_queue(rx_queue, channel) 677 efx_for_each_channel_rx_queue(rx_queue, channel)
675 efx_fini_rx_queue(rx_queue); 678 efx_fini_rx_queue(rx_queue);
676 efx_for_each_channel_tx_queue(tx_queue, channel) 679 efx_for_each_possible_channel_tx_queue(tx_queue, channel)
677 efx_fini_tx_queue(tx_queue); 680 efx_fini_tx_queue(tx_queue);
678 efx_fini_eventq(channel); 681 efx_fini_eventq(channel);
679 } 682 }
@@ -689,7 +692,7 @@ static void efx_remove_channel(struct efx_channel *channel)
689 692
690 efx_for_each_channel_rx_queue(rx_queue, channel) 693 efx_for_each_channel_rx_queue(rx_queue, channel)
691 efx_remove_rx_queue(rx_queue); 694 efx_remove_rx_queue(rx_queue);
692 efx_for_each_channel_tx_queue(tx_queue, channel) 695 efx_for_each_possible_channel_tx_queue(tx_queue, channel)
693 efx_remove_tx_queue(tx_queue); 696 efx_remove_tx_queue(tx_queue);
694 efx_remove_eventq(channel); 697 efx_remove_eventq(channel);
695} 698}
@@ -1175,10 +1178,32 @@ static int efx_wanted_channels(void)
1175 return count; 1178 return count;
1176} 1179}
1177 1180
1181static int
1182efx_init_rx_cpu_rmap(struct efx_nic *efx, struct msix_entry *xentries)
1183{
1184#ifdef CONFIG_RFS_ACCEL
1185 int i, rc;
1186
1187 efx->net_dev->rx_cpu_rmap = alloc_irq_cpu_rmap(efx->n_rx_channels);
1188 if (!efx->net_dev->rx_cpu_rmap)
1189 return -ENOMEM;
1190 for (i = 0; i < efx->n_rx_channels; i++) {
1191 rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap,
1192 xentries[i].vector);
1193 if (rc) {
1194 free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
1195 efx->net_dev->rx_cpu_rmap = NULL;
1196 return rc;
1197 }
1198 }
1199#endif
1200 return 0;
1201}
1202
1178/* Probe the number and type of interrupts we are able to obtain, and 1203/* Probe the number and type of interrupts we are able to obtain, and
1179 * the resulting numbers of channels and RX queues. 1204 * the resulting numbers of channels and RX queues.
1180 */ 1205 */
1181static void efx_probe_interrupts(struct efx_nic *efx) 1206static int efx_probe_interrupts(struct efx_nic *efx)
1182{ 1207{
1183 int max_channels = 1208 int max_channels =
1184 min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS); 1209 min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
@@ -1220,6 +1245,11 @@ static void efx_probe_interrupts(struct efx_nic *efx)
1220 efx->n_tx_channels = efx->n_channels; 1245 efx->n_tx_channels = efx->n_channels;
1221 efx->n_rx_channels = efx->n_channels; 1246 efx->n_rx_channels = efx->n_channels;
1222 } 1247 }
1248 rc = efx_init_rx_cpu_rmap(efx, xentries);
1249 if (rc) {
1250 pci_disable_msix(efx->pci_dev);
1251 return rc;
1252 }
1223 for (i = 0; i < n_channels; i++) 1253 for (i = 0; i < n_channels; i++)
1224 efx_get_channel(efx, i)->irq = 1254 efx_get_channel(efx, i)->irq =
1225 xentries[i].vector; 1255 xentries[i].vector;
@@ -1253,6 +1283,8 @@ static void efx_probe_interrupts(struct efx_nic *efx)
1253 efx->n_tx_channels = 1; 1283 efx->n_tx_channels = 1;
1254 efx->legacy_irq = efx->pci_dev->irq; 1284 efx->legacy_irq = efx->pci_dev->irq;
1255 } 1285 }
1286
1287 return 0;
1256} 1288}
1257 1289
1258static void efx_remove_interrupts(struct efx_nic *efx) 1290static void efx_remove_interrupts(struct efx_nic *efx)
@@ -1271,21 +1303,8 @@ static void efx_remove_interrupts(struct efx_nic *efx)
1271 1303
1272static void efx_set_channels(struct efx_nic *efx) 1304static void efx_set_channels(struct efx_nic *efx)
1273{ 1305{
1274 struct efx_channel *channel;
1275 struct efx_tx_queue *tx_queue;
1276
1277 efx->tx_channel_offset = 1306 efx->tx_channel_offset =
1278 separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; 1307 separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
1279
1280 /* Channel pointers were set in efx_init_struct() but we now
1281 * need to clear them for TX queues in any RX-only channels. */
1282 efx_for_each_channel(channel, efx) {
1283 if (channel->channel - efx->tx_channel_offset >=
1284 efx->n_tx_channels) {
1285 efx_for_each_channel_tx_queue(tx_queue, channel)
1286 tx_queue->channel = NULL;
1287 }
1288 }
1289} 1308}
1290 1309
1291static int efx_probe_nic(struct efx_nic *efx) 1310static int efx_probe_nic(struct efx_nic *efx)
@@ -1302,7 +1321,9 @@ static int efx_probe_nic(struct efx_nic *efx)
1302 1321
1303 /* Determine the number of channels and queues by trying to hook 1322 /* Determine the number of channels and queues by trying to hook
1304 * in MSI-X interrupts. */ 1323 * in MSI-X interrupts. */
1305 efx_probe_interrupts(efx); 1324 rc = efx_probe_interrupts(efx);
1325 if (rc)
1326 goto fail;
1306 1327
1307 if (efx->n_channels > 1) 1328 if (efx->n_channels > 1)
1308 get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key)); 1329 get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
@@ -1317,6 +1338,10 @@ static int efx_probe_nic(struct efx_nic *efx)
1317 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true); 1338 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true);
1318 1339
1319 return 0; 1340 return 0;
1341
1342fail:
1343 efx->type->remove(efx);
1344 return rc;
1320} 1345}
1321 1346
1322static void efx_remove_nic(struct efx_nic *efx) 1347static void efx_remove_nic(struct efx_nic *efx)
@@ -1531,9 +1556,9 @@ void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs,
1531 efx->irq_rx_adaptive = rx_adaptive; 1556 efx->irq_rx_adaptive = rx_adaptive;
1532 efx->irq_rx_moderation = rx_ticks; 1557 efx->irq_rx_moderation = rx_ticks;
1533 efx_for_each_channel(channel, efx) { 1558 efx_for_each_channel(channel, efx) {
1534 if (efx_channel_get_rx_queue(channel)) 1559 if (efx_channel_has_rx_queue(channel))
1535 channel->irq_moderation = rx_ticks; 1560 channel->irq_moderation = rx_ticks;
1536 else if (efx_channel_get_tx_queue(channel, 0)) 1561 else if (efx_channel_has_tx_queues(channel))
1537 channel->irq_moderation = tx_ticks; 1562 channel->irq_moderation = tx_ticks;
1538 } 1563 }
1539} 1564}
@@ -1849,6 +1874,10 @@ static const struct net_device_ops efx_netdev_ops = {
1849#ifdef CONFIG_NET_POLL_CONTROLLER 1874#ifdef CONFIG_NET_POLL_CONTROLLER
1850 .ndo_poll_controller = efx_netpoll, 1875 .ndo_poll_controller = efx_netpoll,
1851#endif 1876#endif
1877 .ndo_setup_tc = efx_setup_tc,
1878#ifdef CONFIG_RFS_ACCEL
1879 .ndo_rx_flow_steer = efx_filter_rfs,
1880#endif
1852}; 1881};
1853 1882
1854static void efx_update_name(struct efx_nic *efx) 1883static void efx_update_name(struct efx_nic *efx)
@@ -1910,10 +1939,8 @@ static int efx_register_netdev(struct efx_nic *efx)
1910 1939
1911 efx_for_each_channel(channel, efx) { 1940 efx_for_each_channel(channel, efx) {
1912 struct efx_tx_queue *tx_queue; 1941 struct efx_tx_queue *tx_queue;
1913 efx_for_each_channel_tx_queue(tx_queue, channel) { 1942 efx_for_each_channel_tx_queue(tx_queue, channel)
1914 tx_queue->core_txq = netdev_get_tx_queue( 1943 efx_init_tx_queue_core_txq(tx_queue);
1915 efx->net_dev, tx_queue->queue / EFX_TXQ_TYPES);
1916 }
1917 } 1944 }
1918 1945
1919 /* Always start with carrier off; PHY events will detect the link */ 1946 /* Always start with carrier off; PHY events will detect the link */
@@ -2288,6 +2315,10 @@ static void efx_fini_struct(struct efx_nic *efx)
2288 */ 2315 */
2289static void efx_pci_remove_main(struct efx_nic *efx) 2316static void efx_pci_remove_main(struct efx_nic *efx)
2290{ 2317{
2318#ifdef CONFIG_RFS_ACCEL
2319 free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
2320 efx->net_dev->rx_cpu_rmap = NULL;
2321#endif
2291 efx_nic_fini_interrupt(efx); 2322 efx_nic_fini_interrupt(efx);
2292 efx_fini_channels(efx); 2323 efx_fini_channels(efx);
2293 efx_fini_port(efx); 2324 efx_fini_port(efx);
@@ -2401,7 +2432,8 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2401 int i, rc; 2432 int i, rc;
2402 2433
2403 /* Allocate and initialise a struct net_device and struct efx_nic */ 2434 /* Allocate and initialise a struct net_device and struct efx_nic */
2404 net_dev = alloc_etherdev_mq(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES); 2435 net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
2436 EFX_MAX_RX_QUEUES);
2405 if (!net_dev) 2437 if (!net_dev)
2406 return -ENOMEM; 2438 return -ENOMEM;
2407 net_dev->features |= (type->offload_features | NETIF_F_SG | 2439 net_dev->features |= (type->offload_features | NETIF_F_SG |
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index d43a7e5212b..3d83a1f74fe 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -29,6 +29,7 @@
29extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue); 29extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
30extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue); 30extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
31extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue); 31extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
32extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
32extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue); 33extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
33extern void efx_release_tx_buffers(struct efx_tx_queue *tx_queue); 34extern void efx_release_tx_buffers(struct efx_tx_queue *tx_queue);
34extern netdev_tx_t 35extern netdev_tx_t
@@ -36,6 +37,7 @@ efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
36extern netdev_tx_t 37extern netdev_tx_t
37efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); 38efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
38extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); 39extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
40extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
39 41
40/* RX */ 42/* RX */
41extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); 43extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
@@ -74,6 +76,21 @@ extern int efx_filter_remove_filter(struct efx_nic *efx,
74 struct efx_filter_spec *spec); 76 struct efx_filter_spec *spec);
75extern void efx_filter_clear_rx(struct efx_nic *efx, 77extern void efx_filter_clear_rx(struct efx_nic *efx,
76 enum efx_filter_priority priority); 78 enum efx_filter_priority priority);
79#ifdef CONFIG_RFS_ACCEL
80extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
81 u16 rxq_index, u32 flow_id);
82extern bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota);
83static inline void efx_filter_rfs_expire(struct efx_channel *channel)
84{
85 if (channel->rfs_filters_added >= 60 &&
86 __efx_filter_rfs_expire(channel->efx, 100))
87 channel->rfs_filters_added -= 60;
88}
89#define efx_filter_rfs_enabled() 1
90#else
91static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
92#define efx_filter_rfs_enabled() 0
93#endif
77 94
78/* Channels */ 95/* Channels */
79extern void efx_process_channel_now(struct efx_channel *channel); 96extern void efx_process_channel_now(struct efx_channel *channel);
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index ca886d98bdc..807178ef65a 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -28,7 +28,8 @@ struct efx_ethtool_stat {
28 enum { 28 enum {
29 EFX_ETHTOOL_STAT_SOURCE_mac_stats, 29 EFX_ETHTOOL_STAT_SOURCE_mac_stats,
30 EFX_ETHTOOL_STAT_SOURCE_nic, 30 EFX_ETHTOOL_STAT_SOURCE_nic,
31 EFX_ETHTOOL_STAT_SOURCE_channel 31 EFX_ETHTOOL_STAT_SOURCE_channel,
32 EFX_ETHTOOL_STAT_SOURCE_tx_queue
32 } source; 33 } source;
33 unsigned offset; 34 unsigned offset;
34 u64(*get_stat) (void *field); /* Reader function */ 35 u64(*get_stat) (void *field); /* Reader function */
@@ -86,6 +87,10 @@ static u64 efx_get_atomic_stat(void *field)
86 EFX_ETHTOOL_STAT(field, channel, n_##field, \ 87 EFX_ETHTOOL_STAT(field, channel, n_##field, \
87 unsigned int, efx_get_uint_stat) 88 unsigned int, efx_get_uint_stat)
88 89
90#define EFX_ETHTOOL_UINT_TXQ_STAT(field) \
91 EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \
92 unsigned int, efx_get_uint_stat)
93
89static struct efx_ethtool_stat efx_ethtool_stats[] = { 94static struct efx_ethtool_stat efx_ethtool_stats[] = {
90 EFX_ETHTOOL_U64_MAC_STAT(tx_bytes), 95 EFX_ETHTOOL_U64_MAC_STAT(tx_bytes),
91 EFX_ETHTOOL_U64_MAC_STAT(tx_good_bytes), 96 EFX_ETHTOOL_U64_MAC_STAT(tx_good_bytes),
@@ -116,6 +121,10 @@ static struct efx_ethtool_stat efx_ethtool_stats[] = {
116 EFX_ETHTOOL_ULONG_MAC_STAT(tx_non_tcpudp), 121 EFX_ETHTOOL_ULONG_MAC_STAT(tx_non_tcpudp),
117 EFX_ETHTOOL_ULONG_MAC_STAT(tx_mac_src_error), 122 EFX_ETHTOOL_ULONG_MAC_STAT(tx_mac_src_error),
118 EFX_ETHTOOL_ULONG_MAC_STAT(tx_ip_src_error), 123 EFX_ETHTOOL_ULONG_MAC_STAT(tx_ip_src_error),
124 EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts),
125 EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
126 EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
127 EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
119 EFX_ETHTOOL_U64_MAC_STAT(rx_bytes), 128 EFX_ETHTOOL_U64_MAC_STAT(rx_bytes),
120 EFX_ETHTOOL_U64_MAC_STAT(rx_good_bytes), 129 EFX_ETHTOOL_U64_MAC_STAT(rx_good_bytes),
121 EFX_ETHTOOL_U64_MAC_STAT(rx_bad_bytes), 130 EFX_ETHTOOL_U64_MAC_STAT(rx_bad_bytes),
@@ -237,8 +246,8 @@ static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
237 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 246 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
238 strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version)); 247 strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
239 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) 248 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
240 siena_print_fwver(efx, info->fw_version, 249 efx_mcdi_print_fwver(efx, info->fw_version,
241 sizeof(info->fw_version)); 250 sizeof(info->fw_version));
242 strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info)); 251 strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
243} 252}
244 253
@@ -470,6 +479,7 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
470 struct efx_mac_stats *mac_stats = &efx->mac_stats; 479 struct efx_mac_stats *mac_stats = &efx->mac_stats;
471 struct efx_ethtool_stat *stat; 480 struct efx_ethtool_stat *stat;
472 struct efx_channel *channel; 481 struct efx_channel *channel;
482 struct efx_tx_queue *tx_queue;
473 struct rtnl_link_stats64 temp; 483 struct rtnl_link_stats64 temp;
474 int i; 484 int i;
475 485
@@ -495,6 +505,15 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
495 data[i] += stat->get_stat((void *)channel + 505 data[i] += stat->get_stat((void *)channel +
496 stat->offset); 506 stat->offset);
497 break; 507 break;
508 case EFX_ETHTOOL_STAT_SOURCE_tx_queue:
509 data[i] = 0;
510 efx_for_each_channel(channel, efx) {
511 efx_for_each_channel_tx_queue(tx_queue, channel)
512 data[i] +=
513 stat->get_stat((void *)tx_queue
514 + stat->offset);
515 }
516 break;
498 } 517 }
499 } 518 }
500} 519}
@@ -502,7 +521,7 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
502static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable) 521static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable)
503{ 522{
504 struct efx_nic *efx __attribute__ ((unused)) = netdev_priv(net_dev); 523 struct efx_nic *efx __attribute__ ((unused)) = netdev_priv(net_dev);
505 unsigned long features; 524 u32 features;
506 525
507 features = NETIF_F_TSO; 526 features = NETIF_F_TSO;
508 if (efx->type->offload_features & NETIF_F_V6_CSUM) 527 if (efx->type->offload_features & NETIF_F_V6_CSUM)
@@ -519,7 +538,7 @@ static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable)
519static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable) 538static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable)
520{ 539{
521 struct efx_nic *efx = netdev_priv(net_dev); 540 struct efx_nic *efx = netdev_priv(net_dev);
522 unsigned long features = efx->type->offload_features & NETIF_F_ALL_CSUM; 541 u32 features = efx->type->offload_features & NETIF_F_ALL_CSUM;
523 542
524 if (enable) 543 if (enable)
525 net_dev->features |= features; 544 net_dev->features |= features;
@@ -635,7 +654,7 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev,
635 /* Find lowest IRQ moderation across all used TX queues */ 654 /* Find lowest IRQ moderation across all used TX queues */
636 coalesce->tx_coalesce_usecs_irq = ~((u32) 0); 655 coalesce->tx_coalesce_usecs_irq = ~((u32) 0);
637 efx_for_each_channel(channel, efx) { 656 efx_for_each_channel(channel, efx) {
638 if (!efx_channel_get_tx_queue(channel, 0)) 657 if (!efx_channel_has_tx_queues(channel))
639 continue; 658 continue;
640 if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) { 659 if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) {
641 if (channel->channel < efx->n_rx_channels) 660 if (channel->channel < efx->n_rx_channels)
@@ -680,8 +699,8 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
680 699
681 /* If the channel is shared only allow RX parameters to be set */ 700 /* If the channel is shared only allow RX parameters to be set */
682 efx_for_each_channel(channel, efx) { 701 efx_for_each_channel(channel, efx) {
683 if (efx_channel_get_rx_queue(channel) && 702 if (efx_channel_has_rx_queue(channel) &&
684 efx_channel_get_tx_queue(channel, 0) && 703 efx_channel_has_tx_queues(channel) &&
685 tx_usecs) { 704 tx_usecs) {
686 netif_err(efx, drv, efx->net_dev, "Channel is shared. " 705 netif_err(efx, drv, efx->net_dev, "Channel is shared. "
687 "Only RX coalescing may be set\n"); 706 "Only RX coalescing may be set\n");
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 61ddd2c6e75..734fcfb52e8 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -1478,36 +1478,26 @@ static void falcon_init_rx_cfg(struct efx_nic *efx)
1478 /* RX control FIFO thresholds (32 entries) */ 1478 /* RX control FIFO thresholds (32 entries) */
1479 const unsigned ctrl_xon_thr = 20; 1479 const unsigned ctrl_xon_thr = 20;
1480 const unsigned ctrl_xoff_thr = 25; 1480 const unsigned ctrl_xoff_thr = 25;
1481 /* RX data FIFO thresholds (256-byte units; size varies) */
1482 int data_xon_thr = efx_nic_rx_xon_thresh >> 8;
1483 int data_xoff_thr = efx_nic_rx_xoff_thresh >> 8;
1484 efx_oword_t reg; 1481 efx_oword_t reg;
1485 1482
1486 efx_reado(efx, &reg, FR_AZ_RX_CFG); 1483 efx_reado(efx, &reg, FR_AZ_RX_CFG);
1487 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) { 1484 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
1488 /* Data FIFO size is 5.5K */ 1485 /* Data FIFO size is 5.5K */
1489 if (data_xon_thr < 0)
1490 data_xon_thr = 512 >> 8;
1491 if (data_xoff_thr < 0)
1492 data_xoff_thr = 2048 >> 8;
1493 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0); 1486 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0);
1494 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE, 1487 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE,
1495 huge_buf_size); 1488 huge_buf_size);
1496 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, data_xon_thr); 1489 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, 512 >> 8);
1497 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, data_xoff_thr); 1490 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, 2048 >> 8);
1498 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr); 1491 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr);
1499 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr); 1492 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr);
1500 } else { 1493 } else {
1501 /* Data FIFO size is 80K; register fields moved */ 1494 /* Data FIFO size is 80K; register fields moved */
1502 if (data_xon_thr < 0)
1503 data_xon_thr = 27648 >> 8; /* ~3*max MTU */
1504 if (data_xoff_thr < 0)
1505 data_xoff_thr = 54272 >> 8; /* ~80Kb - 3*max MTU */
1506 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0); 1495 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0);
1507 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE, 1496 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE,
1508 huge_buf_size); 1497 huge_buf_size);
1509 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, data_xon_thr); 1498 /* Send XON and XOFF at ~3 * max MTU away from empty/full */
1510 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, data_xoff_thr); 1499 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, 27648 >> 8);
1500 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, 54272 >> 8);
1511 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr); 1501 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr);
1512 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr); 1502 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr);
1513 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1); 1503 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c
index 2dd16f0b3ce..b9cc846811d 100644
--- a/drivers/net/sfc/falcon_boards.c
+++ b/drivers/net/sfc/falcon_boards.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007-2009 Solarflare Communications Inc. 3 * Copyright 2007-2010 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index b49e8439464..2c9ee5db3bf 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/filter.c b/drivers/net/sfc/filter.c
index d4722c41c4c..95a980fd63d 100644
--- a/drivers/net/sfc/filter.c
+++ b/drivers/net/sfc/filter.c
@@ -8,6 +8,7 @@
8 */ 8 */
9 9
10#include <linux/in.h> 10#include <linux/in.h>
11#include <net/ip.h>
11#include "efx.h" 12#include "efx.h"
12#include "filter.h" 13#include "filter.h"
13#include "io.h" 14#include "io.h"
@@ -27,6 +28,10 @@
27 */ 28 */
28#define FILTER_CTL_SRCH_MAX 200 29#define FILTER_CTL_SRCH_MAX 200
29 30
31/* Don't try very hard to find space for performance hints, as this is
32 * counter-productive. */
33#define FILTER_CTL_SRCH_HINT_MAX 5
34
30enum efx_filter_table_id { 35enum efx_filter_table_id {
31 EFX_FILTER_TABLE_RX_IP = 0, 36 EFX_FILTER_TABLE_RX_IP = 0,
32 EFX_FILTER_TABLE_RX_MAC, 37 EFX_FILTER_TABLE_RX_MAC,
@@ -47,6 +52,10 @@ struct efx_filter_table {
47struct efx_filter_state { 52struct efx_filter_state {
48 spinlock_t lock; 53 spinlock_t lock;
49 struct efx_filter_table table[EFX_FILTER_TABLE_COUNT]; 54 struct efx_filter_table table[EFX_FILTER_TABLE_COUNT];
55#ifdef CONFIG_RFS_ACCEL
56 u32 *rps_flow_id;
57 unsigned rps_expire_index;
58#endif
50}; 59};
51 60
52/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit 61/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
@@ -325,15 +334,16 @@ static int efx_filter_search(struct efx_filter_table *table,
325 struct efx_filter_spec *spec, u32 key, 334 struct efx_filter_spec *spec, u32 key,
326 bool for_insert, int *depth_required) 335 bool for_insert, int *depth_required)
327{ 336{
328 unsigned hash, incr, filter_idx, depth; 337 unsigned hash, incr, filter_idx, depth, depth_max;
329 struct efx_filter_spec *cmp; 338 struct efx_filter_spec *cmp;
330 339
331 hash = efx_filter_hash(key); 340 hash = efx_filter_hash(key);
332 incr = efx_filter_increment(key); 341 incr = efx_filter_increment(key);
342 depth_max = (spec->priority <= EFX_FILTER_PRI_HINT ?
343 FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX);
333 344
334 for (depth = 1, filter_idx = hash & (table->size - 1); 345 for (depth = 1, filter_idx = hash & (table->size - 1);
335 depth <= FILTER_CTL_SRCH_MAX && 346 depth <= depth_max && test_bit(filter_idx, table->used_bitmap);
336 test_bit(filter_idx, table->used_bitmap);
337 ++depth) { 347 ++depth) {
338 cmp = &table->spec[filter_idx]; 348 cmp = &table->spec[filter_idx];
339 if (efx_filter_equal(spec, cmp)) 349 if (efx_filter_equal(spec, cmp))
@@ -342,7 +352,7 @@ static int efx_filter_search(struct efx_filter_table *table,
342 } 352 }
343 if (!for_insert) 353 if (!for_insert)
344 return -ENOENT; 354 return -ENOENT;
345 if (depth > FILTER_CTL_SRCH_MAX) 355 if (depth > depth_max)
346 return -EBUSY; 356 return -EBUSY;
347found: 357found:
348 *depth_required = depth; 358 *depth_required = depth;
@@ -562,6 +572,13 @@ int efx_probe_filters(struct efx_nic *efx)
562 spin_lock_init(&state->lock); 572 spin_lock_init(&state->lock);
563 573
564 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 574 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
575#ifdef CONFIG_RFS_ACCEL
576 state->rps_flow_id = kcalloc(FR_BZ_RX_FILTER_TBL0_ROWS,
577 sizeof(*state->rps_flow_id),
578 GFP_KERNEL);
579 if (!state->rps_flow_id)
580 goto fail;
581#endif
565 table = &state->table[EFX_FILTER_TABLE_RX_IP]; 582 table = &state->table[EFX_FILTER_TABLE_RX_IP];
566 table->id = EFX_FILTER_TABLE_RX_IP; 583 table->id = EFX_FILTER_TABLE_RX_IP;
567 table->offset = FR_BZ_RX_FILTER_TBL0; 584 table->offset = FR_BZ_RX_FILTER_TBL0;
@@ -607,5 +624,97 @@ void efx_remove_filters(struct efx_nic *efx)
607 kfree(state->table[table_id].used_bitmap); 624 kfree(state->table[table_id].used_bitmap);
608 vfree(state->table[table_id].spec); 625 vfree(state->table[table_id].spec);
609 } 626 }
627#ifdef CONFIG_RFS_ACCEL
628 kfree(state->rps_flow_id);
629#endif
610 kfree(state); 630 kfree(state);
611} 631}
632
633#ifdef CONFIG_RFS_ACCEL
634
635int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
636 u16 rxq_index, u32 flow_id)
637{
638 struct efx_nic *efx = netdev_priv(net_dev);
639 struct efx_channel *channel;
640 struct efx_filter_state *state = efx->filter_state;
641 struct efx_filter_spec spec;
642 const struct iphdr *ip;
643 const __be16 *ports;
644 int nhoff;
645 int rc;
646
647 nhoff = skb_network_offset(skb);
648
649 if (skb->protocol != htons(ETH_P_IP))
650 return -EPROTONOSUPPORT;
651
652 /* RFS must validate the IP header length before calling us */
653 EFX_BUG_ON_PARANOID(!pskb_may_pull(skb, nhoff + sizeof(*ip)));
654 ip = (const struct iphdr *)(skb->data + nhoff);
655 if (ip->frag_off & htons(IP_MF | IP_OFFSET))
656 return -EPROTONOSUPPORT;
657 EFX_BUG_ON_PARANOID(!pskb_may_pull(skb, nhoff + 4 * ip->ihl + 4));
658 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
659
660 efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, 0, rxq_index);
661 rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
662 ip->daddr, ports[1], ip->saddr, ports[0]);
663 if (rc)
664 return rc;
665
666 rc = efx_filter_insert_filter(efx, &spec, true);
667 if (rc < 0)
668 return rc;
669
670 /* Remember this so we can check whether to expire the filter later */
671 state->rps_flow_id[rc] = flow_id;
672 channel = efx_get_channel(efx, skb_get_rx_queue(skb));
673 ++channel->rfs_filters_added;
674
675 netif_info(efx, rx_status, efx->net_dev,
676 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
677 (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
678 &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
679 rxq_index, flow_id, rc);
680
681 return rc;
682}
683
684bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota)
685{
686 struct efx_filter_state *state = efx->filter_state;
687 struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_IP];
688 unsigned mask = table->size - 1;
689 unsigned index;
690 unsigned stop;
691
692 if (!spin_trylock_bh(&state->lock))
693 return false;
694
695 index = state->rps_expire_index;
696 stop = (index + quota) & mask;
697
698 while (index != stop) {
699 if (test_bit(index, table->used_bitmap) &&
700 table->spec[index].priority == EFX_FILTER_PRI_HINT &&
701 rps_may_expire_flow(efx->net_dev,
702 table->spec[index].dmaq_id,
703 state->rps_flow_id[index], index)) {
704 netif_info(efx, rx_status, efx->net_dev,
705 "expiring filter %d [flow %u]\n",
706 index, state->rps_flow_id[index]);
707 efx_filter_table_clear_entry(efx, table, index);
708 }
709 index = (index + 1) & mask;
710 }
711
712 state->rps_expire_index = stop;
713 if (table->used == 0)
714 efx_filter_table_reset_search_depth(table);
715
716 spin_unlock_bh(&state->lock);
717 return true;
718}
719
720#endif /* CONFIG_RFS_ACCEL */
diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h
index 6da4ae20a03..dc45110b245 100644
--- a/drivers/net/sfc/io.h
+++ b/drivers/net/sfc/io.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
index b716e827b29..8bba8955f31 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/sfc/mcdi.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2008-2009 Solarflare Communications Inc. 3 * Copyright 2008-2011 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -602,7 +602,7 @@ void efx_mcdi_process_event(struct efx_channel *channel,
602 ************************************************************************** 602 **************************************************************************
603 */ 603 */
604 604
605int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build) 605void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
606{ 606{
607 u8 outbuf[ALIGN(MC_CMD_GET_VERSION_V1_OUT_LEN, 4)]; 607 u8 outbuf[ALIGN(MC_CMD_GET_VERSION_V1_OUT_LEN, 4)];
608 size_t outlength; 608 size_t outlength;
@@ -616,29 +616,20 @@ int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build)
616 if (rc) 616 if (rc)
617 goto fail; 617 goto fail;
618 618
619 if (outlength == MC_CMD_GET_VERSION_V0_OUT_LEN) {
620 *version = 0;
621 *build = MCDI_DWORD(outbuf, GET_VERSION_OUT_FIRMWARE);
622 return 0;
623 }
624
625 if (outlength < MC_CMD_GET_VERSION_V1_OUT_LEN) { 619 if (outlength < MC_CMD_GET_VERSION_V1_OUT_LEN) {
626 rc = -EIO; 620 rc = -EIO;
627 goto fail; 621 goto fail;
628 } 622 }
629 623
630 ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION); 624 ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
631 *version = (((u64)le16_to_cpu(ver_words[0]) << 48) | 625 snprintf(buf, len, "%u.%u.%u.%u",
632 ((u64)le16_to_cpu(ver_words[1]) << 32) | 626 le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]),
633 ((u64)le16_to_cpu(ver_words[2]) << 16) | 627 le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3]));
634 le16_to_cpu(ver_words[3])); 628 return;
635 *build = MCDI_DWORD(outbuf, GET_VERSION_OUT_FIRMWARE);
636
637 return 0;
638 629
639fail: 630fail:
640 netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 631 netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
641 return rc; 632 buf[0] = 0;
642} 633}
643 634
644int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, 635int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
diff --git a/drivers/net/sfc/mcdi.h b/drivers/net/sfc/mcdi.h
index c792f1d65e4..aced2a7856f 100644
--- a/drivers/net/sfc/mcdi.h
+++ b/drivers/net/sfc/mcdi.h
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2008-2009 Solarflare Communications Inc. 3 * Copyright 2008-2010 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -93,7 +93,7 @@ extern void efx_mcdi_process_event(struct efx_channel *channel,
93#define MCDI_EVENT_FIELD(_ev, _field) \ 93#define MCDI_EVENT_FIELD(_ev, _field) \
94 EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field) 94 EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
95 95
96extern int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build); 96extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
97extern int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, 97extern int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
98 bool *was_attached_out); 98 bool *was_attached_out);
99extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, 99extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
diff --git a/drivers/net/sfc/mcdi_mac.c b/drivers/net/sfc/mcdi_mac.c
index f88f4bf986f..33f7294edb4 100644
--- a/drivers/net/sfc/mcdi_mac.c
+++ b/drivers/net/sfc/mcdi_mac.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2009 Solarflare Communications Inc. 3 * Copyright 2009-2010 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/mcdi_pcol.h b/drivers/net/sfc/mcdi_pcol.h
index 90359e64400..b86a15f221a 100644
--- a/drivers/net/sfc/mcdi_pcol.h
+++ b/drivers/net/sfc/mcdi_pcol.h
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2009 Solarflare Communications Inc. 3 * Copyright 2009-2011 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c
index 0e97eed663c..ec3f740f546 100644
--- a/drivers/net/sfc/mcdi_phy.c
+++ b/drivers/net/sfc/mcdi_phy.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2009 Solarflare Communications Inc. 3 * Copyright 2009-2010 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
index 56b0266b441..19e68c26d10 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/sfc/mdio_10g.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006-2009 Solarflare Communications Inc. 3 * Copyright 2006-2011 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -51,13 +51,10 @@ int efx_mdio_reset_mmd(struct efx_nic *port, int mmd,
51 return spins ? spins : -ETIMEDOUT; 51 return spins ? spins : -ETIMEDOUT;
52} 52}
53 53
54static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd, int fault_fatal) 54static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd)
55{ 55{
56 int status; 56 int status;
57 57
58 if (LOOPBACK_INTERNAL(efx))
59 return 0;
60
61 if (mmd != MDIO_MMD_AN) { 58 if (mmd != MDIO_MMD_AN) {
62 /* Read MMD STATUS2 to check it is responding. */ 59 /* Read MMD STATUS2 to check it is responding. */
63 status = efx_mdio_read(efx, mmd, MDIO_STAT2); 60 status = efx_mdio_read(efx, mmd, MDIO_STAT2);
@@ -68,20 +65,6 @@ static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd, int fault_fatal)
68 } 65 }
69 } 66 }
70 67
71 /* Read MMD STATUS 1 to check for fault. */
72 status = efx_mdio_read(efx, mmd, MDIO_STAT1);
73 if (status & MDIO_STAT1_FAULT) {
74 if (fault_fatal) {
75 netif_err(efx, hw, efx->net_dev,
76 "PHY MMD %d reporting fatal"
77 " fault: status %x\n", mmd, status);
78 return -EIO;
79 } else {
80 netif_dbg(efx, hw, efx->net_dev,
81 "PHY MMD %d reporting status"
82 " %x (expected)\n", mmd, status);
83 }
84 }
85 return 0; 68 return 0;
86} 69}
87 70
@@ -130,8 +113,7 @@ int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask)
130 return rc; 113 return rc;
131} 114}
132 115
133int efx_mdio_check_mmds(struct efx_nic *efx, 116int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask)
134 unsigned int mmd_mask, unsigned int fatal_mask)
135{ 117{
136 int mmd = 0, probe_mmd, devs1, devs2; 118 int mmd = 0, probe_mmd, devs1, devs2;
137 u32 devices; 119 u32 devices;
@@ -161,13 +143,9 @@ int efx_mdio_check_mmds(struct efx_nic *efx,
161 143
162 /* Check all required MMDs are responding and happy. */ 144 /* Check all required MMDs are responding and happy. */
163 while (mmd_mask) { 145 while (mmd_mask) {
164 if (mmd_mask & 1) { 146 if ((mmd_mask & 1) && efx_mdio_check_mmd(efx, mmd))
165 int fault_fatal = fatal_mask & 1; 147 return -EIO;
166 if (efx_mdio_check_mmd(efx, mmd, fault_fatal))
167 return -EIO;
168 }
169 mmd_mask = mmd_mask >> 1; 148 mmd_mask = mmd_mask >> 1;
170 fatal_mask = fatal_mask >> 1;
171 mmd++; 149 mmd++;
172 } 150 }
173 151
@@ -337,7 +315,7 @@ int efx_mdio_test_alive(struct efx_nic *efx)
337 "no MDIO PHY present with ID %d\n", efx->mdio.prtad); 315 "no MDIO PHY present with ID %d\n", efx->mdio.prtad);
338 rc = -EINVAL; 316 rc = -EINVAL;
339 } else { 317 } else {
340 rc = efx_mdio_check_mmds(efx, efx->mdio.mmds, 0); 318 rc = efx_mdio_check_mmds(efx, efx->mdio.mmds);
341 } 319 }
342 320
343 mutex_unlock(&efx->mac_lock); 321 mutex_unlock(&efx->mac_lock);
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h
index 75791d3d496..df0703940c8 100644
--- a/drivers/net/sfc/mdio_10g.h
+++ b/drivers/net/sfc/mdio_10g.h
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006-2009 Solarflare Communications Inc. 3 * Copyright 2006-2011 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -68,8 +68,7 @@ extern int efx_mdio_reset_mmd(struct efx_nic *efx, int mmd,
68 int spins, int spintime); 68 int spins, int spintime);
69 69
70/* As efx_mdio_check_mmd but for multiple MMDs */ 70/* As efx_mdio_check_mmd but for multiple MMDs */
71int efx_mdio_check_mmds(struct efx_nic *efx, 71int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask);
72 unsigned int mmd_mask, unsigned int fatal_mask);
73 72
74/* Check the link status of specified mmds in bit mask */ 73/* Check the link status of specified mmds in bit mask */
75extern bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask); 74extern bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask);
diff --git a/drivers/net/sfc/mtd.c b/drivers/net/sfc/mtd.c
index d38627448c2..e646bfce2d8 100644
--- a/drivers/net/sfc/mtd.c
+++ b/drivers/net/sfc/mtd.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 28df8665256..215d5c51bfa 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2009 Solarflare Communications Inc. 4 * Copyright 2005-2011 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -41,7 +41,7 @@
41 * 41 *
42 **************************************************************************/ 42 **************************************************************************/
43 43
44#define EFX_DRIVER_VERSION "3.0" 44#define EFX_DRIVER_VERSION "3.1"
45 45
46#ifdef EFX_ENABLE_DEBUG 46#ifdef EFX_ENABLE_DEBUG
47#define EFX_BUG_ON_PARANOID(x) BUG_ON(x) 47#define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
@@ -63,10 +63,12 @@
63/* Checksum generation is a per-queue option in hardware, so each 63/* Checksum generation is a per-queue option in hardware, so each
64 * queue visible to the networking core is backed by two hardware TX 64 * queue visible to the networking core is backed by two hardware TX
65 * queues. */ 65 * queues. */
66#define EFX_MAX_CORE_TX_QUEUES EFX_MAX_CHANNELS 66#define EFX_MAX_TX_TC 2
67#define EFX_TXQ_TYPE_OFFLOAD 1 67#define EFX_MAX_CORE_TX_QUEUES (EFX_MAX_TX_TC * EFX_MAX_CHANNELS)
68#define EFX_TXQ_TYPES 2 68#define EFX_TXQ_TYPE_OFFLOAD 1 /* flag */
69#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CORE_TX_QUEUES) 69#define EFX_TXQ_TYPE_HIGHPRI 2 /* flag */
70#define EFX_TXQ_TYPES 4
71#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
70 72
71/** 73/**
72 * struct efx_special_buffer - An Efx special buffer 74 * struct efx_special_buffer - An Efx special buffer
@@ -140,6 +142,7 @@ struct efx_tx_buffer {
140 * @buffer: The software buffer ring 142 * @buffer: The software buffer ring
141 * @txd: The hardware descriptor ring 143 * @txd: The hardware descriptor ring
142 * @ptr_mask: The size of the ring minus 1. 144 * @ptr_mask: The size of the ring minus 1.
145 * @initialised: Has hardware queue been initialised?
143 * @flushed: Used when handling queue flushing 146 * @flushed: Used when handling queue flushing
144 * @read_count: Current read pointer. 147 * @read_count: Current read pointer.
145 * This is the number of buffers that have been removed from both rings. 148 * This is the number of buffers that have been removed from both rings.
@@ -182,6 +185,7 @@ struct efx_tx_queue {
182 struct efx_tx_buffer *buffer; 185 struct efx_tx_buffer *buffer;
183 struct efx_special_buffer txd; 186 struct efx_special_buffer txd;
184 unsigned int ptr_mask; 187 unsigned int ptr_mask;
188 bool initialised;
185 enum efx_flush_state flushed; 189 enum efx_flush_state flushed;
186 190
187 /* Members used mainly on the completion path */ 191 /* Members used mainly on the completion path */
@@ -210,15 +214,17 @@ struct efx_tx_queue {
210 * If both this and page are %NULL, the buffer slot is currently free. 214 * If both this and page are %NULL, the buffer slot is currently free.
211 * @page: The associated page buffer, if any. 215 * @page: The associated page buffer, if any.
212 * If both this and skb are %NULL, the buffer slot is currently free. 216 * If both this and skb are %NULL, the buffer slot is currently free.
213 * @data: Pointer to ethernet header
214 * @len: Buffer length, in bytes. 217 * @len: Buffer length, in bytes.
218 * @is_page: Indicates if @page is valid. If false, @skb is valid.
215 */ 219 */
216struct efx_rx_buffer { 220struct efx_rx_buffer {
217 dma_addr_t dma_addr; 221 dma_addr_t dma_addr;
218 struct sk_buff *skb; 222 union {
219 struct page *page; 223 struct sk_buff *skb;
220 char *data; 224 struct page *page;
225 } u;
221 unsigned int len; 226 unsigned int len;
227 bool is_page;
222}; 228};
223 229
224/** 230/**
@@ -358,6 +364,9 @@ struct efx_channel {
358 364
359 unsigned int irq_count; 365 unsigned int irq_count;
360 unsigned int irq_mod_score; 366 unsigned int irq_mod_score;
367#ifdef CONFIG_RFS_ACCEL
368 unsigned int rfs_filters_added;
369#endif
361 370
362 int rx_alloc_level; 371 int rx_alloc_level;
363 int rx_alloc_push_pages; 372 int rx_alloc_push_pages;
@@ -377,7 +386,7 @@ struct efx_channel {
377 bool rx_pkt_csummed; 386 bool rx_pkt_csummed;
378 387
379 struct efx_rx_queue rx_queue; 388 struct efx_rx_queue rx_queue;
380 struct efx_tx_queue tx_queue[2]; 389 struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
381}; 390};
382 391
383enum efx_led_mode { 392enum efx_led_mode {
@@ -906,7 +915,7 @@ struct efx_nic_type {
906 unsigned int phys_addr_channels; 915 unsigned int phys_addr_channels;
907 unsigned int tx_dc_base; 916 unsigned int tx_dc_base;
908 unsigned int rx_dc_base; 917 unsigned int rx_dc_base;
909 unsigned long offload_features; 918 u32 offload_features;
910 u32 reset_world_flags; 919 u32 reset_world_flags;
911}; 920};
912 921
@@ -938,18 +947,40 @@ efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
938 return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type]; 947 return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type];
939} 948}
940 949
950static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
951{
952 return channel->channel - channel->efx->tx_channel_offset <
953 channel->efx->n_tx_channels;
954}
955
941static inline struct efx_tx_queue * 956static inline struct efx_tx_queue *
942efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type) 957efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
943{ 958{
944 struct efx_tx_queue *tx_queue = channel->tx_queue; 959 EFX_BUG_ON_PARANOID(!efx_channel_has_tx_queues(channel) ||
945 EFX_BUG_ON_PARANOID(type >= EFX_TXQ_TYPES); 960 type >= EFX_TXQ_TYPES);
946 return tx_queue->channel ? tx_queue + type : NULL; 961 return &channel->tx_queue[type];
962}
963
964static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue)
965{
966 return !(tx_queue->efx->net_dev->num_tc < 2 &&
967 tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI);
947} 968}
948 969
949/* Iterate over all TX queues belonging to a channel */ 970/* Iterate over all TX queues belonging to a channel */
950#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \ 971#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \
951 for (_tx_queue = efx_channel_get_tx_queue(channel, 0); \ 972 if (!efx_channel_has_tx_queues(_channel)) \
952 _tx_queue && _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \ 973 ; \
974 else \
975 for (_tx_queue = (_channel)->tx_queue; \
976 _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES && \
977 efx_tx_queue_used(_tx_queue); \
978 _tx_queue++)
979
980/* Iterate over all possible TX queues belonging to a channel */
981#define efx_for_each_possible_channel_tx_queue(_tx_queue, _channel) \
982 for (_tx_queue = (_channel)->tx_queue; \
983 _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
953 _tx_queue++) 984 _tx_queue++)
954 985
955static inline struct efx_rx_queue * 986static inline struct efx_rx_queue *
@@ -959,18 +990,26 @@ efx_get_rx_queue(struct efx_nic *efx, unsigned index)
959 return &efx->channel[index]->rx_queue; 990 return &efx->channel[index]->rx_queue;
960} 991}
961 992
993static inline bool efx_channel_has_rx_queue(struct efx_channel *channel)
994{
995 return channel->channel < channel->efx->n_rx_channels;
996}
997
962static inline struct efx_rx_queue * 998static inline struct efx_rx_queue *
963efx_channel_get_rx_queue(struct efx_channel *channel) 999efx_channel_get_rx_queue(struct efx_channel *channel)
964{ 1000{
965 return channel->channel < channel->efx->n_rx_channels ? 1001 EFX_BUG_ON_PARANOID(!efx_channel_has_rx_queue(channel));
966 &channel->rx_queue : NULL; 1002 return &channel->rx_queue;
967} 1003}
968 1004
969/* Iterate over all RX queues belonging to a channel */ 1005/* Iterate over all RX queues belonging to a channel */
970#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \ 1006#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \
971 for (_rx_queue = efx_channel_get_rx_queue(channel); \ 1007 if (!efx_channel_has_rx_queue(_channel)) \
972 _rx_queue; \ 1008 ; \
973 _rx_queue = NULL) 1009 else \
1010 for (_rx_queue = &(_channel)->rx_queue; \
1011 _rx_queue; \
1012 _rx_queue = NULL)
974 1013
975static inline struct efx_channel * 1014static inline struct efx_channel *
976efx_rx_queue_channel(struct efx_rx_queue *rx_queue) 1015efx_rx_queue_channel(struct efx_rx_queue *rx_queue)
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index da386599ab6..e8396614daf 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2011 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -41,26 +41,6 @@
41#define RX_DC_ENTRIES 64 41#define RX_DC_ENTRIES 64
42#define RX_DC_ENTRIES_ORDER 3 42#define RX_DC_ENTRIES_ORDER 3
43 43
44/* RX FIFO XOFF watermark
45 *
46 * When the amount of the RX FIFO increases used increases past this
47 * watermark send XOFF. Only used if RX flow control is enabled (ethtool -A)
48 * This also has an effect on RX/TX arbitration
49 */
50int efx_nic_rx_xoff_thresh = -1;
51module_param_named(rx_xoff_thresh_bytes, efx_nic_rx_xoff_thresh, int, 0644);
52MODULE_PARM_DESC(rx_xoff_thresh_bytes, "RX fifo XOFF threshold");
53
54/* RX FIFO XON watermark
55 *
56 * When the amount of the RX FIFO used decreases below this
57 * watermark send XON. Only used if TX flow control is enabled (ethtool -A)
58 * This also has an effect on RX/TX arbitration
59 */
60int efx_nic_rx_xon_thresh = -1;
61module_param_named(rx_xon_thresh_bytes, efx_nic_rx_xon_thresh, int, 0644);
62MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
63
64/* If EFX_MAX_INT_ERRORS internal errors occur within 44/* If EFX_MAX_INT_ERRORS internal errors occur within
65 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and 45 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
66 * disable it. 46 * disable it.
@@ -445,8 +425,8 @@ int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
445 425
446void efx_nic_init_tx(struct efx_tx_queue *tx_queue) 426void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
447{ 427{
448 efx_oword_t tx_desc_ptr;
449 struct efx_nic *efx = tx_queue->efx; 428 struct efx_nic *efx = tx_queue->efx;
429 efx_oword_t reg;
450 430
451 tx_queue->flushed = FLUSH_NONE; 431 tx_queue->flushed = FLUSH_NONE;
452 432
@@ -454,7 +434,7 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
454 efx_init_special_buffer(efx, &tx_queue->txd); 434 efx_init_special_buffer(efx, &tx_queue->txd);
455 435
456 /* Push TX descriptor ring to card */ 436 /* Push TX descriptor ring to card */
457 EFX_POPULATE_OWORD_10(tx_desc_ptr, 437 EFX_POPULATE_OWORD_10(reg,
458 FRF_AZ_TX_DESCQ_EN, 1, 438 FRF_AZ_TX_DESCQ_EN, 1,
459 FRF_AZ_TX_ISCSI_DDIG_EN, 0, 439 FRF_AZ_TX_ISCSI_DDIG_EN, 0,
460 FRF_AZ_TX_ISCSI_HDIG_EN, 0, 440 FRF_AZ_TX_ISCSI_HDIG_EN, 0,
@@ -470,17 +450,15 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
470 450
471 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 451 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
472 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; 452 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
473 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum); 453 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
474 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS, 454 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
475 !csum); 455 !csum);
476 } 456 }
477 457
478 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 458 efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
479 tx_queue->queue); 459 tx_queue->queue);
480 460
481 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { 461 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
482 efx_oword_t reg;
483
484 /* Only 128 bits in this register */ 462 /* Only 128 bits in this register */
485 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128); 463 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
486 464
@@ -491,6 +469,16 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
491 set_bit_le(tx_queue->queue, (void *)&reg); 469 set_bit_le(tx_queue->queue, (void *)&reg);
492 efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG); 470 efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
493 } 471 }
472
473 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
474 EFX_POPULATE_OWORD_1(reg,
475 FRF_BZ_TX_PACE,
476 (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
477 FFE_BZ_TX_PACE_OFF :
478 FFE_BZ_TX_PACE_RESERVED);
479 efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
480 tx_queue->queue);
481 }
494} 482}
495 483
496static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue) 484static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
@@ -1238,8 +1226,10 @@ int efx_nic_flush_queues(struct efx_nic *efx)
1238 1226
1239 /* Flush all tx queues in parallel */ 1227 /* Flush all tx queues in parallel */
1240 efx_for_each_channel(channel, efx) { 1228 efx_for_each_channel(channel, efx) {
1241 efx_for_each_channel_tx_queue(tx_queue, channel) 1229 efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1242 efx_flush_tx_queue(tx_queue); 1230 if (tx_queue->initialised)
1231 efx_flush_tx_queue(tx_queue);
1232 }
1243 } 1233 }
1244 1234
1245 /* The hardware supports four concurrent rx flushes, each of which may 1235 /* The hardware supports four concurrent rx flushes, each of which may
@@ -1262,8 +1252,9 @@ int efx_nic_flush_queues(struct efx_nic *efx)
1262 ++rx_pending; 1252 ++rx_pending;
1263 } 1253 }
1264 } 1254 }
1265 efx_for_each_channel_tx_queue(tx_queue, channel) { 1255 efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1266 if (tx_queue->flushed != FLUSH_DONE) 1256 if (tx_queue->initialised &&
1257 tx_queue->flushed != FLUSH_DONE)
1267 ++tx_pending; 1258 ++tx_pending;
1268 } 1259 }
1269 } 1260 }
@@ -1278,8 +1269,9 @@ int efx_nic_flush_queues(struct efx_nic *efx)
1278 /* Mark the queues as all flushed. We're going to return failure 1269 /* Mark the queues as all flushed. We're going to return failure
1279 * leading to a reset, or fake up success anyway */ 1270 * leading to a reset, or fake up success anyway */
1280 efx_for_each_channel(channel, efx) { 1271 efx_for_each_channel(channel, efx) {
1281 efx_for_each_channel_tx_queue(tx_queue, channel) { 1272 efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1282 if (tx_queue->flushed != FLUSH_DONE) 1273 if (tx_queue->initialised &&
1274 tx_queue->flushed != FLUSH_DONE)
1283 netif_err(efx, hw, efx->net_dev, 1275 netif_err(efx, hw, efx->net_dev,
1284 "tx queue %d flush command timed out\n", 1276 "tx queue %d flush command timed out\n",
1285 tx_queue->queue); 1277 tx_queue->queue);
@@ -1682,6 +1674,19 @@ void efx_nic_init_common(struct efx_nic *efx)
1682 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1674 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1683 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); 1675 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
1684 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); 1676 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
1677
1678 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1679 EFX_POPULATE_OWORD_4(temp,
1680 /* Default values */
1681 FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
1682 FRF_BZ_TX_PACE_SB_AF, 0xb,
1683 FRF_BZ_TX_PACE_FB_BASE, 0,
1684 /* Allow large pace values in the
1685 * fast bin. */
1686 FRF_BZ_TX_PACE_BIN_TH,
1687 FFE_BZ_TX_PACE_RESERVED);
1688 efx_writeo(efx, &temp, FR_BZ_TX_PACE);
1689 }
1685} 1690}
1686 1691
1687/* Register dump */ 1692/* Register dump */
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index eb0586925b5..d9de1b647d4 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2011 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -142,20 +142,14 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
142 142
143/** 143/**
144 * struct siena_nic_data - Siena NIC state 144 * struct siena_nic_data - Siena NIC state
145 * @fw_version: Management controller firmware version
146 * @fw_build: Firmware build number
147 * @mcdi: Management-Controller-to-Driver Interface 145 * @mcdi: Management-Controller-to-Driver Interface
148 * @wol_filter_id: Wake-on-LAN packet filter id 146 * @wol_filter_id: Wake-on-LAN packet filter id
149 */ 147 */
150struct siena_nic_data { 148struct siena_nic_data {
151 u64 fw_version;
152 u32 fw_build;
153 struct efx_mcdi_iface mcdi; 149 struct efx_mcdi_iface mcdi;
154 int wol_filter_id; 150 int wol_filter_id;
155}; 151};
156 152
157extern void siena_print_fwver(struct efx_nic *efx, char *buf, size_t len);
158
159extern struct efx_nic_type falcon_a1_nic_type; 153extern struct efx_nic_type falcon_a1_nic_type;
160extern struct efx_nic_type falcon_b0_nic_type; 154extern struct efx_nic_type falcon_b0_nic_type;
161extern struct efx_nic_type siena_a0_nic_type; 155extern struct efx_nic_type siena_a0_nic_type;
@@ -194,7 +188,6 @@ extern void efx_nic_eventq_read_ack(struct efx_channel *channel);
194/* MAC/PHY */ 188/* MAC/PHY */
195extern void falcon_drain_tx_fifo(struct efx_nic *efx); 189extern void falcon_drain_tx_fifo(struct efx_nic *efx);
196extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx); 190extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx);
197extern int efx_nic_rx_xoff_thresh, efx_nic_rx_xon_thresh;
198 191
199/* Interrupts and test events */ 192/* Interrupts and test events */
200extern int efx_nic_init_interrupt(struct efx_nic *efx); 193extern int efx_nic_init_interrupt(struct efx_nic *efx);
diff --git a/drivers/net/sfc/phy.h b/drivers/net/sfc/phy.h
index 1dab609757f..b3b79472421 100644
--- a/drivers/net/sfc/phy.h
+++ b/drivers/net/sfc/phy.h
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007-2009 Solarflare Communications Inc. 3 * Copyright 2007-2010 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c
index ea3ae008931..55f90924247 100644
--- a/drivers/net/sfc/qt202x_phy.c
+++ b/drivers/net/sfc/qt202x_phy.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006-2009 Solarflare Communications Inc. 3 * Copyright 2006-2010 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/regs.h b/drivers/net/sfc/regs.h
index 96430ed81c3..cc2c86b76a7 100644
--- a/drivers/net/sfc/regs.h
+++ b/drivers/net/sfc/regs.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -2907,6 +2907,12 @@
2907#define FRF_CZ_TMFT_SRC_MAC_HI_LBN 44 2907#define FRF_CZ_TMFT_SRC_MAC_HI_LBN 44
2908#define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH 16 2908#define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH 16
2909 2909
2910/* TX_PACE_TBL */
2911/* Values >20 are documented as reserved, but will result in a queue going
2912 * into the fast bin with a pace value of zero. */
2913#define FFE_BZ_TX_PACE_OFF 0
2914#define FFE_BZ_TX_PACE_RESERVED 21
2915
2910/* DRIVER_EV */ 2916/* DRIVER_EV */
2911/* Sub-fields of an RX flush completion event */ 2917/* Sub-fields of an RX flush completion event */
2912#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12 2918#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 3925fd62117..c0fdb59030f 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2009 Solarflare Communications Inc. 4 * Copyright 2005-2011 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -89,24 +89,37 @@ static unsigned int rx_refill_limit = 95;
89 */ 89 */
90#define EFX_RXD_HEAD_ROOM 2 90#define EFX_RXD_HEAD_ROOM 2
91 91
92static inline unsigned int efx_rx_buf_offset(struct efx_rx_buffer *buf) 92/* Offset of ethernet header within page */
93static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
94 struct efx_rx_buffer *buf)
93{ 95{
94 /* Offset is always within one page, so we don't need to consider 96 /* Offset is always within one page, so we don't need to consider
95 * the page order. 97 * the page order.
96 */ 98 */
97 return (__force unsigned long) buf->data & (PAGE_SIZE - 1); 99 return (((__force unsigned long) buf->dma_addr & (PAGE_SIZE - 1)) +
100 efx->type->rx_buffer_hash_size);
98} 101}
99static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) 102static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
100{ 103{
101 return PAGE_SIZE << efx->rx_buffer_order; 104 return PAGE_SIZE << efx->rx_buffer_order;
102} 105}
103 106
104static inline u32 efx_rx_buf_hash(struct efx_rx_buffer *buf) 107static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf)
105{ 108{
109 if (buf->is_page)
110 return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf);
111 else
112 return ((u8 *)buf->u.skb->data +
113 efx->type->rx_buffer_hash_size);
114}
115
116static inline u32 efx_rx_buf_hash(const u8 *eh)
117{
118 /* The ethernet header is always directly after any hash. */
106#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0 119#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0
107 return __le32_to_cpup((const __le32 *)(buf->data - 4)); 120 return __le32_to_cpup((const __le32 *)(eh - 4));
108#else 121#else
109 const u8 *data = (const u8 *)(buf->data - 4); 122 const u8 *data = eh - 4;
110 return ((u32)data[0] | 123 return ((u32)data[0] |
111 (u32)data[1] << 8 | 124 (u32)data[1] << 8 |
112 (u32)data[2] << 16 | 125 (u32)data[2] << 16 |
@@ -129,6 +142,7 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
129 struct efx_nic *efx = rx_queue->efx; 142 struct efx_nic *efx = rx_queue->efx;
130 struct net_device *net_dev = efx->net_dev; 143 struct net_device *net_dev = efx->net_dev;
131 struct efx_rx_buffer *rx_buf; 144 struct efx_rx_buffer *rx_buf;
145 struct sk_buff *skb;
132 int skb_len = efx->rx_buffer_len; 146 int skb_len = efx->rx_buffer_len;
133 unsigned index, count; 147 unsigned index, count;
134 148
@@ -136,24 +150,23 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
136 index = rx_queue->added_count & rx_queue->ptr_mask; 150 index = rx_queue->added_count & rx_queue->ptr_mask;
137 rx_buf = efx_rx_buffer(rx_queue, index); 151 rx_buf = efx_rx_buffer(rx_queue, index);
138 152
139 rx_buf->skb = netdev_alloc_skb(net_dev, skb_len); 153 rx_buf->u.skb = skb = netdev_alloc_skb(net_dev, skb_len);
140 if (unlikely(!rx_buf->skb)) 154 if (unlikely(!skb))
141 return -ENOMEM; 155 return -ENOMEM;
142 rx_buf->page = NULL;
143 156
144 /* Adjust the SKB for padding and checksum */ 157 /* Adjust the SKB for padding and checksum */
145 skb_reserve(rx_buf->skb, NET_IP_ALIGN); 158 skb_reserve(skb, NET_IP_ALIGN);
146 rx_buf->len = skb_len - NET_IP_ALIGN; 159 rx_buf->len = skb_len - NET_IP_ALIGN;
147 rx_buf->data = (char *)rx_buf->skb->data; 160 rx_buf->is_page = false;
148 rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY; 161 skb->ip_summed = CHECKSUM_UNNECESSARY;
149 162
150 rx_buf->dma_addr = pci_map_single(efx->pci_dev, 163 rx_buf->dma_addr = pci_map_single(efx->pci_dev,
151 rx_buf->data, rx_buf->len, 164 skb->data, rx_buf->len,
152 PCI_DMA_FROMDEVICE); 165 PCI_DMA_FROMDEVICE);
153 if (unlikely(pci_dma_mapping_error(efx->pci_dev, 166 if (unlikely(pci_dma_mapping_error(efx->pci_dev,
154 rx_buf->dma_addr))) { 167 rx_buf->dma_addr))) {
155 dev_kfree_skb_any(rx_buf->skb); 168 dev_kfree_skb_any(skb);
156 rx_buf->skb = NULL; 169 rx_buf->u.skb = NULL;
157 return -EIO; 170 return -EIO;
158 } 171 }
159 172
@@ -211,10 +224,9 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
211 index = rx_queue->added_count & rx_queue->ptr_mask; 224 index = rx_queue->added_count & rx_queue->ptr_mask;
212 rx_buf = efx_rx_buffer(rx_queue, index); 225 rx_buf = efx_rx_buffer(rx_queue, index);
213 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; 226 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
214 rx_buf->skb = NULL; 227 rx_buf->u.page = page;
215 rx_buf->page = page;
216 rx_buf->data = page_addr + EFX_PAGE_IP_ALIGN;
217 rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; 228 rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
229 rx_buf->is_page = true;
218 ++rx_queue->added_count; 230 ++rx_queue->added_count;
219 ++rx_queue->alloc_page_count; 231 ++rx_queue->alloc_page_count;
220 ++state->refcnt; 232 ++state->refcnt;
@@ -235,19 +247,17 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
235static void efx_unmap_rx_buffer(struct efx_nic *efx, 247static void efx_unmap_rx_buffer(struct efx_nic *efx,
236 struct efx_rx_buffer *rx_buf) 248 struct efx_rx_buffer *rx_buf)
237{ 249{
238 if (rx_buf->page) { 250 if (rx_buf->is_page && rx_buf->u.page) {
239 struct efx_rx_page_state *state; 251 struct efx_rx_page_state *state;
240 252
241 EFX_BUG_ON_PARANOID(rx_buf->skb); 253 state = page_address(rx_buf->u.page);
242
243 state = page_address(rx_buf->page);
244 if (--state->refcnt == 0) { 254 if (--state->refcnt == 0) {
245 pci_unmap_page(efx->pci_dev, 255 pci_unmap_page(efx->pci_dev,
246 state->dma_addr, 256 state->dma_addr,
247 efx_rx_buf_size(efx), 257 efx_rx_buf_size(efx),
248 PCI_DMA_FROMDEVICE); 258 PCI_DMA_FROMDEVICE);
249 } 259 }
250 } else if (likely(rx_buf->skb)) { 260 } else if (!rx_buf->is_page && rx_buf->u.skb) {
251 pci_unmap_single(efx->pci_dev, rx_buf->dma_addr, 261 pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
252 rx_buf->len, PCI_DMA_FROMDEVICE); 262 rx_buf->len, PCI_DMA_FROMDEVICE);
253 } 263 }
@@ -256,12 +266,12 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
256static void efx_free_rx_buffer(struct efx_nic *efx, 266static void efx_free_rx_buffer(struct efx_nic *efx,
257 struct efx_rx_buffer *rx_buf) 267 struct efx_rx_buffer *rx_buf)
258{ 268{
259 if (rx_buf->page) { 269 if (rx_buf->is_page && rx_buf->u.page) {
260 __free_pages(rx_buf->page, efx->rx_buffer_order); 270 __free_pages(rx_buf->u.page, efx->rx_buffer_order);
261 rx_buf->page = NULL; 271 rx_buf->u.page = NULL;
262 } else if (likely(rx_buf->skb)) { 272 } else if (!rx_buf->is_page && rx_buf->u.skb) {
263 dev_kfree_skb_any(rx_buf->skb); 273 dev_kfree_skb_any(rx_buf->u.skb);
264 rx_buf->skb = NULL; 274 rx_buf->u.skb = NULL;
265 } 275 }
266} 276}
267 277
@@ -277,7 +287,7 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
277static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue, 287static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
278 struct efx_rx_buffer *rx_buf) 288 struct efx_rx_buffer *rx_buf)
279{ 289{
280 struct efx_rx_page_state *state = page_address(rx_buf->page); 290 struct efx_rx_page_state *state = page_address(rx_buf->u.page);
281 struct efx_rx_buffer *new_buf; 291 struct efx_rx_buffer *new_buf;
282 unsigned fill_level, index; 292 unsigned fill_level, index;
283 293
@@ -292,16 +302,14 @@ static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
292 } 302 }
293 303
294 ++state->refcnt; 304 ++state->refcnt;
295 get_page(rx_buf->page); 305 get_page(rx_buf->u.page);
296 306
297 index = rx_queue->added_count & rx_queue->ptr_mask; 307 index = rx_queue->added_count & rx_queue->ptr_mask;
298 new_buf = efx_rx_buffer(rx_queue, index); 308 new_buf = efx_rx_buffer(rx_queue, index);
299 new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1); 309 new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
300 new_buf->skb = NULL; 310 new_buf->u.page = rx_buf->u.page;
301 new_buf->page = rx_buf->page;
302 new_buf->data = (void *)
303 ((__force unsigned long)rx_buf->data ^ (PAGE_SIZE >> 1));
304 new_buf->len = rx_buf->len; 311 new_buf->len = rx_buf->len;
312 new_buf->is_page = true;
305 ++rx_queue->added_count; 313 ++rx_queue->added_count;
306} 314}
307 315
@@ -315,16 +323,15 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
315 struct efx_rx_buffer *new_buf; 323 struct efx_rx_buffer *new_buf;
316 unsigned index; 324 unsigned index;
317 325
318 if (rx_buf->page != NULL && efx->rx_buffer_len <= EFX_RX_HALF_PAGE && 326 if (rx_buf->is_page && efx->rx_buffer_len <= EFX_RX_HALF_PAGE &&
319 page_count(rx_buf->page) == 1) 327 page_count(rx_buf->u.page) == 1)
320 efx_resurrect_rx_buffer(rx_queue, rx_buf); 328 efx_resurrect_rx_buffer(rx_queue, rx_buf);
321 329
322 index = rx_queue->added_count & rx_queue->ptr_mask; 330 index = rx_queue->added_count & rx_queue->ptr_mask;
323 new_buf = efx_rx_buffer(rx_queue, index); 331 new_buf = efx_rx_buffer(rx_queue, index);
324 332
325 memcpy(new_buf, rx_buf, sizeof(*new_buf)); 333 memcpy(new_buf, rx_buf, sizeof(*new_buf));
326 rx_buf->page = NULL; 334 rx_buf->u.page = NULL;
327 rx_buf->skb = NULL;
328 ++rx_queue->added_count; 335 ++rx_queue->added_count;
329} 336}
330 337
@@ -428,7 +435,7 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
428 * data at the end of the skb will be trashed. So 435 * data at the end of the skb will be trashed. So
429 * we have no choice but to leak the fragment. 436 * we have no choice but to leak the fragment.
430 */ 437 */
431 *leak_packet = (rx_buf->skb != NULL); 438 *leak_packet = !rx_buf->is_page;
432 efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY); 439 efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
433 } else { 440 } else {
434 if (net_ratelimit()) 441 if (net_ratelimit())
@@ -448,19 +455,18 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
448 */ 455 */
449static void efx_rx_packet_gro(struct efx_channel *channel, 456static void efx_rx_packet_gro(struct efx_channel *channel,
450 struct efx_rx_buffer *rx_buf, 457 struct efx_rx_buffer *rx_buf,
451 bool checksummed) 458 const u8 *eh, bool checksummed)
452{ 459{
453 struct napi_struct *napi = &channel->napi_str; 460 struct napi_struct *napi = &channel->napi_str;
454 gro_result_t gro_result; 461 gro_result_t gro_result;
455 462
456 /* Pass the skb/page into the GRO engine */ 463 /* Pass the skb/page into the GRO engine */
457 if (rx_buf->page) { 464 if (rx_buf->is_page) {
458 struct efx_nic *efx = channel->efx; 465 struct efx_nic *efx = channel->efx;
459 struct page *page = rx_buf->page; 466 struct page *page = rx_buf->u.page;
460 struct sk_buff *skb; 467 struct sk_buff *skb;
461 468
462 EFX_BUG_ON_PARANOID(rx_buf->skb); 469 rx_buf->u.page = NULL;
463 rx_buf->page = NULL;
464 470
465 skb = napi_get_frags(napi); 471 skb = napi_get_frags(napi);
466 if (!skb) { 472 if (!skb) {
@@ -469,11 +475,11 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
469 } 475 }
470 476
471 if (efx->net_dev->features & NETIF_F_RXHASH) 477 if (efx->net_dev->features & NETIF_F_RXHASH)
472 skb->rxhash = efx_rx_buf_hash(rx_buf); 478 skb->rxhash = efx_rx_buf_hash(eh);
473 479
474 skb_shinfo(skb)->frags[0].page = page; 480 skb_shinfo(skb)->frags[0].page = page;
475 skb_shinfo(skb)->frags[0].page_offset = 481 skb_shinfo(skb)->frags[0].page_offset =
476 efx_rx_buf_offset(rx_buf); 482 efx_rx_buf_offset(efx, rx_buf);
477 skb_shinfo(skb)->frags[0].size = rx_buf->len; 483 skb_shinfo(skb)->frags[0].size = rx_buf->len;
478 skb_shinfo(skb)->nr_frags = 1; 484 skb_shinfo(skb)->nr_frags = 1;
479 485
@@ -487,11 +493,10 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
487 493
488 gro_result = napi_gro_frags(napi); 494 gro_result = napi_gro_frags(napi);
489 } else { 495 } else {
490 struct sk_buff *skb = rx_buf->skb; 496 struct sk_buff *skb = rx_buf->u.skb;
491 497
492 EFX_BUG_ON_PARANOID(!skb);
493 EFX_BUG_ON_PARANOID(!checksummed); 498 EFX_BUG_ON_PARANOID(!checksummed);
494 rx_buf->skb = NULL; 499 rx_buf->u.skb = NULL;
495 500
496 gro_result = napi_gro_receive(napi, skb); 501 gro_result = napi_gro_receive(napi, skb);
497 } 502 }
@@ -513,9 +518,6 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
513 bool leak_packet = false; 518 bool leak_packet = false;
514 519
515 rx_buf = efx_rx_buffer(rx_queue, index); 520 rx_buf = efx_rx_buffer(rx_queue, index);
516 EFX_BUG_ON_PARANOID(!rx_buf->data);
517 EFX_BUG_ON_PARANOID(rx_buf->skb && rx_buf->page);
518 EFX_BUG_ON_PARANOID(!(rx_buf->skb || rx_buf->page));
519 521
520 /* This allows the refill path to post another buffer. 522 /* This allows the refill path to post another buffer.
521 * EFX_RXD_HEAD_ROOM ensures that the slot we are using 523 * EFX_RXD_HEAD_ROOM ensures that the slot we are using
@@ -554,12 +556,12 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
554 /* Prefetch nice and early so data will (hopefully) be in cache by 556 /* Prefetch nice and early so data will (hopefully) be in cache by
555 * the time we look at it. 557 * the time we look at it.
556 */ 558 */
557 prefetch(rx_buf->data); 559 prefetch(efx_rx_buf_eh(efx, rx_buf));
558 560
559 /* Pipeline receives so that we give time for packet headers to be 561 /* Pipeline receives so that we give time for packet headers to be
560 * prefetched into cache. 562 * prefetched into cache.
561 */ 563 */
562 rx_buf->len = len; 564 rx_buf->len = len - efx->type->rx_buffer_hash_size;
563out: 565out:
564 if (channel->rx_pkt) 566 if (channel->rx_pkt)
565 __efx_rx_packet(channel, 567 __efx_rx_packet(channel,
@@ -574,45 +576,43 @@ void __efx_rx_packet(struct efx_channel *channel,
574{ 576{
575 struct efx_nic *efx = channel->efx; 577 struct efx_nic *efx = channel->efx;
576 struct sk_buff *skb; 578 struct sk_buff *skb;
577 579 u8 *eh = efx_rx_buf_eh(efx, rx_buf);
578 rx_buf->data += efx->type->rx_buffer_hash_size;
579 rx_buf->len -= efx->type->rx_buffer_hash_size;
580 580
581 /* If we're in loopback test, then pass the packet directly to the 581 /* If we're in loopback test, then pass the packet directly to the
582 * loopback layer, and free the rx_buf here 582 * loopback layer, and free the rx_buf here
583 */ 583 */
584 if (unlikely(efx->loopback_selftest)) { 584 if (unlikely(efx->loopback_selftest)) {
585 efx_loopback_rx_packet(efx, rx_buf->data, rx_buf->len); 585 efx_loopback_rx_packet(efx, eh, rx_buf->len);
586 efx_free_rx_buffer(efx, rx_buf); 586 efx_free_rx_buffer(efx, rx_buf);
587 return; 587 return;
588 } 588 }
589 589
590 if (rx_buf->skb) { 590 if (!rx_buf->is_page) {
591 prefetch(skb_shinfo(rx_buf->skb)); 591 skb = rx_buf->u.skb;
592 592
593 skb_reserve(rx_buf->skb, efx->type->rx_buffer_hash_size); 593 prefetch(skb_shinfo(skb));
594 skb_put(rx_buf->skb, rx_buf->len); 594
595 skb_reserve(skb, efx->type->rx_buffer_hash_size);
596 skb_put(skb, rx_buf->len);
595 597
596 if (efx->net_dev->features & NETIF_F_RXHASH) 598 if (efx->net_dev->features & NETIF_F_RXHASH)
597 rx_buf->skb->rxhash = efx_rx_buf_hash(rx_buf); 599 skb->rxhash = efx_rx_buf_hash(eh);
598 600
599 /* Move past the ethernet header. rx_buf->data still points 601 /* Move past the ethernet header. rx_buf->data still points
600 * at the ethernet header */ 602 * at the ethernet header */
601 rx_buf->skb->protocol = eth_type_trans(rx_buf->skb, 603 skb->protocol = eth_type_trans(skb, efx->net_dev);
602 efx->net_dev);
603 604
604 skb_record_rx_queue(rx_buf->skb, channel->channel); 605 skb_record_rx_queue(skb, channel->channel);
605 } 606 }
606 607
607 if (likely(checksummed || rx_buf->page)) { 608 if (likely(checksummed || rx_buf->is_page)) {
608 efx_rx_packet_gro(channel, rx_buf, checksummed); 609 efx_rx_packet_gro(channel, rx_buf, eh, checksummed);
609 return; 610 return;
610 } 611 }
611 612
612 /* We now own the SKB */ 613 /* We now own the SKB */
613 skb = rx_buf->skb; 614 skb = rx_buf->u.skb;
614 rx_buf->skb = NULL; 615 rx_buf->u.skb = NULL;
615 EFX_BUG_ON_PARANOID(!skb);
616 616
617 /* Set the SKB flags */ 617 /* Set the SKB flags */
618 skb_checksum_none_assert(skb); 618 skb_checksum_none_assert(skb);
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 0ebfb99f129..a0f49b348d6 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -644,7 +644,7 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
644 goto out; 644 goto out;
645 } 645 }
646 646
647 /* Test both types of TX queue */ 647 /* Test all enabled types of TX queue */
648 efx_for_each_channel_tx_queue(tx_queue, channel) { 648 efx_for_each_channel_tx_queue(tx_queue, channel) {
649 state->offload_csum = (tx_queue->queue & 649 state->offload_csum = (tx_queue->queue &
650 EFX_TXQ_TYPE_OFFLOAD); 650 EFX_TXQ_TYPE_OFFLOAD);
diff --git a/drivers/net/sfc/selftest.h b/drivers/net/sfc/selftest.h
index aed495a4dad..dba5456e70f 100644
--- a/drivers/net/sfc/selftest.h
+++ b/drivers/net/sfc/selftest.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index bf845617644..e4dd8986b1f 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -227,13 +227,6 @@ static int siena_probe_nic(struct efx_nic *efx)
227 if (rc) 227 if (rc)
228 goto fail1; 228 goto fail1;
229 229
230 rc = efx_mcdi_fwver(efx, &nic_data->fw_version, &nic_data->fw_build);
231 if (rc) {
232 netif_err(efx, probe, efx->net_dev,
233 "Failed to read MCPU firmware version - rc %d\n", rc);
234 goto fail1; /* MCPU absent? */
235 }
236
237 /* Let the BMC know that the driver is now in charge of link and 230 /* Let the BMC know that the driver is now in charge of link and
238 * filter settings. We must do this before we reset the NIC */ 231 * filter settings. We must do this before we reset the NIC */
239 rc = efx_mcdi_drv_attach(efx, true, &already_attached); 232 rc = efx_mcdi_drv_attach(efx, true, &already_attached);
@@ -348,11 +341,6 @@ static int siena_init_nic(struct efx_nic *efx)
348 FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8); 341 FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
349 efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3); 342 efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
350 343
351 if (efx_nic_rx_xoff_thresh >= 0 || efx_nic_rx_xon_thresh >= 0)
352 /* No MCDI operation has been defined to set thresholds */
353 netif_err(efx, hw, efx->net_dev,
354 "ignoring RX flow control thresholds\n");
355
356 /* Enable event logging */ 344 /* Enable event logging */
357 rc = efx_mcdi_log_ctrl(efx, true, false, 0); 345 rc = efx_mcdi_log_ctrl(efx, true, false, 0);
358 if (rc) 346 if (rc)
@@ -514,16 +502,6 @@ static void siena_stop_nic_stats(struct efx_nic *efx)
514 efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0); 502 efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0);
515} 503}
516 504
517void siena_print_fwver(struct efx_nic *efx, char *buf, size_t len)
518{
519 struct siena_nic_data *nic_data = efx->nic_data;
520 snprintf(buf, len, "%u.%u.%u.%u",
521 (unsigned int)(nic_data->fw_version >> 48),
522 (unsigned int)(nic_data->fw_version >> 32 & 0xffff),
523 (unsigned int)(nic_data->fw_version >> 16 & 0xffff),
524 (unsigned int)(nic_data->fw_version & 0xffff));
525}
526
527/************************************************************************** 505/**************************************************************************
528 * 506 *
529 * Wake on LAN 507 * Wake on LAN
diff --git a/drivers/net/sfc/spi.h b/drivers/net/sfc/spi.h
index 879b7f6bde3..71f2e3ebe1c 100644
--- a/drivers/net/sfc/spi.h
+++ b/drivers/net/sfc/spi.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005 Fen Systems Ltd. 3 * Copyright 2005 Fen Systems Ltd.
4 * Copyright 2006 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index f102912eba9..efdceb35aaa 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007-2009 Solarflare Communications Inc. 3 * Copyright 2007-2011 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -196,7 +196,7 @@ static int tenxpress_phy_init(struct efx_nic *efx)
196 if (rc < 0) 196 if (rc < 0)
197 return rc; 197 return rc;
198 198
199 rc = efx_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0); 199 rc = efx_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS);
200 if (rc < 0) 200 if (rc < 0)
201 return rc; 201 return rc;
202 } 202 }
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 2f5e9da657b..13980190821 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2009 Solarflare Communications Inc. 4 * Copyright 2005-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -336,17 +336,91 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
336{ 336{
337 struct efx_nic *efx = netdev_priv(net_dev); 337 struct efx_nic *efx = netdev_priv(net_dev);
338 struct efx_tx_queue *tx_queue; 338 struct efx_tx_queue *tx_queue;
339 unsigned index, type;
339 340
340 if (unlikely(efx->port_inhibited)) 341 if (unlikely(efx->port_inhibited))
341 return NETDEV_TX_BUSY; 342 return NETDEV_TX_BUSY;
342 343
343 tx_queue = efx_get_tx_queue(efx, skb_get_queue_mapping(skb), 344 index = skb_get_queue_mapping(skb);
344 skb->ip_summed == CHECKSUM_PARTIAL ? 345 type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
345 EFX_TXQ_TYPE_OFFLOAD : 0); 346 if (index >= efx->n_tx_channels) {
347 index -= efx->n_tx_channels;
348 type |= EFX_TXQ_TYPE_HIGHPRI;
349 }
350 tx_queue = efx_get_tx_queue(efx, index, type);
346 351
347 return efx_enqueue_skb(tx_queue, skb); 352 return efx_enqueue_skb(tx_queue, skb);
348} 353}
349 354
355void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
356{
357 struct efx_nic *efx = tx_queue->efx;
358
359 /* Must be inverse of queue lookup in efx_hard_start_xmit() */
360 tx_queue->core_txq =
361 netdev_get_tx_queue(efx->net_dev,
362 tx_queue->queue / EFX_TXQ_TYPES +
363 ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
364 efx->n_tx_channels : 0));
365}
366
367int efx_setup_tc(struct net_device *net_dev, u8 num_tc)
368{
369 struct efx_nic *efx = netdev_priv(net_dev);
370 struct efx_channel *channel;
371 struct efx_tx_queue *tx_queue;
372 unsigned tc;
373 int rc;
374
375 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC)
376 return -EINVAL;
377
378 if (num_tc == net_dev->num_tc)
379 return 0;
380
381 for (tc = 0; tc < num_tc; tc++) {
382 net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
383 net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
384 }
385
386 if (num_tc > net_dev->num_tc) {
387 /* Initialise high-priority queues as necessary */
388 efx_for_each_channel(channel, efx) {
389 efx_for_each_possible_channel_tx_queue(tx_queue,
390 channel) {
391 if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
392 continue;
393 if (!tx_queue->buffer) {
394 rc = efx_probe_tx_queue(tx_queue);
395 if (rc)
396 return rc;
397 }
398 if (!tx_queue->initialised)
399 efx_init_tx_queue(tx_queue);
400 efx_init_tx_queue_core_txq(tx_queue);
401 }
402 }
403 } else {
404 /* Reduce number of classes before number of queues */
405 net_dev->num_tc = num_tc;
406 }
407
408 rc = netif_set_real_num_tx_queues(net_dev,
409 max_t(int, num_tc, 1) *
410 efx->n_tx_channels);
411 if (rc)
412 return rc;
413
414 /* Do not destroy high-priority queues when they become
415 * unused. We would have to flush them first, and it is
416 * fairly difficult to flush a subset of TX queues. Leave
417 * it to efx_fini_channels().
418 */
419
420 net_dev->num_tc = num_tc;
421 return 0;
422}
423
350void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) 424void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
351{ 425{
352 unsigned fill_level; 426 unsigned fill_level;
@@ -430,6 +504,8 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
430 504
431 /* Set up TX descriptor ring */ 505 /* Set up TX descriptor ring */
432 efx_nic_init_tx(tx_queue); 506 efx_nic_init_tx(tx_queue);
507
508 tx_queue->initialised = true;
433} 509}
434 510
435void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) 511void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
@@ -452,9 +528,14 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
452 528
453void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) 529void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
454{ 530{
531 if (!tx_queue->initialised)
532 return;
533
455 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, 534 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
456 "shutting down TX queue %d\n", tx_queue->queue); 535 "shutting down TX queue %d\n", tx_queue->queue);
457 536
537 tx_queue->initialised = false;
538
458 /* Flush TX queue, remove descriptor ring */ 539 /* Flush TX queue, remove descriptor ring */
459 efx_nic_fini_tx(tx_queue); 540 efx_nic_fini_tx(tx_queue);
460 541
@@ -466,6 +547,9 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
466 547
467void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) 548void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
468{ 549{
550 if (!tx_queue->buffer)
551 return;
552
469 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, 553 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
470 "destroying TX queue %d\n", tx_queue->queue); 554 "destroying TX queue %d\n", tx_queue->queue);
471 efx_nic_remove_tx(tx_queue); 555 efx_nic_remove_tx(tx_queue);
diff --git a/drivers/net/sfc/txc43128_phy.c b/drivers/net/sfc/txc43128_phy.c
index 351794a7921..d9886addcc9 100644
--- a/drivers/net/sfc/txc43128_phy.c
+++ b/drivers/net/sfc/txc43128_phy.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006-2010 Solarflare Communications Inc. 3 * Copyright 2006-2011 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -193,7 +193,7 @@ static int txc_reset_phy(struct efx_nic *efx)
193 goto fail; 193 goto fail;
194 194
195 /* Check that all the MMDs we expect are present and responding. */ 195 /* Check that all the MMDs we expect are present and responding. */
196 rc = efx_mdio_check_mmds(efx, TXC_REQUIRED_DEVS, 0); 196 rc = efx_mdio_check_mmds(efx, TXC_REQUIRED_DEVS);
197 if (rc < 0) 197 if (rc < 0)
198 goto fail; 198 goto fail;
199 199
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index e0d63083c3a..e4dd3a7f304 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006-2009 Solarflare Communications Inc. 3 * Copyright 2006-2010 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 819c1750e2a..095e5258088 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -32,10 +32,17 @@
32#include <linux/io.h> 32#include <linux/io.h>
33#include <linux/pm_runtime.h> 33#include <linux/pm_runtime.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/ethtool.h>
35#include <asm/cacheflush.h> 36#include <asm/cacheflush.h>
36 37
37#include "sh_eth.h" 38#include "sh_eth.h"
38 39
40#define SH_ETH_DEF_MSG_ENABLE \
41 (NETIF_MSG_LINK | \
42 NETIF_MSG_TIMER | \
43 NETIF_MSG_RX_ERR| \
44 NETIF_MSG_TX_ERR)
45
39/* There is CPU dependent code */ 46/* There is CPU dependent code */
40#if defined(CONFIG_CPU_SUBTYPE_SH7724) 47#if defined(CONFIG_CPU_SUBTYPE_SH7724)
41#define SH_ETH_RESET_DEFAULT 1 48#define SH_ETH_RESET_DEFAULT 1
@@ -817,6 +824,20 @@ static int sh_eth_rx(struct net_device *ndev)
817 return 0; 824 return 0;
818} 825}
819 826
827static void sh_eth_rcv_snd_disable(u32 ioaddr)
828{
829 /* disable tx and rx */
830 writel(readl(ioaddr + ECMR) &
831 ~(ECMR_RE | ECMR_TE), ioaddr + ECMR);
832}
833
834static void sh_eth_rcv_snd_enable(u32 ioaddr)
835{
836 /* enable tx and rx */
837 writel(readl(ioaddr + ECMR) |
838 (ECMR_RE | ECMR_TE), ioaddr + ECMR);
839}
840
820/* error control function */ 841/* error control function */
821static void sh_eth_error(struct net_device *ndev, int intr_status) 842static void sh_eth_error(struct net_device *ndev, int intr_status)
822{ 843{
@@ -843,11 +864,9 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
843 if (mdp->ether_link_active_low) 864 if (mdp->ether_link_active_low)
844 link_stat = ~link_stat; 865 link_stat = ~link_stat;
845 } 866 }
846 if (!(link_stat & PHY_ST_LINK)) { 867 if (!(link_stat & PHY_ST_LINK))
847 /* Link Down : disable tx and rx */ 868 sh_eth_rcv_snd_disable(ioaddr);
848 writel(readl(ioaddr + ECMR) & 869 else {
849 ~(ECMR_RE | ECMR_TE), ioaddr + ECMR);
850 } else {
851 /* Link Up */ 870 /* Link Up */
852 writel(readl(ioaddr + EESIPR) & 871 writel(readl(ioaddr + EESIPR) &
853 ~DMAC_M_ECI, ioaddr + EESIPR); 872 ~DMAC_M_ECI, ioaddr + EESIPR);
@@ -857,8 +876,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
857 writel(readl(ioaddr + EESIPR) | 876 writel(readl(ioaddr + EESIPR) |
858 DMAC_M_ECI, ioaddr + EESIPR); 877 DMAC_M_ECI, ioaddr + EESIPR);
859 /* enable tx and rx */ 878 /* enable tx and rx */
860 writel(readl(ioaddr + ECMR) | 879 sh_eth_rcv_snd_enable(ioaddr);
861 (ECMR_RE | ECMR_TE), ioaddr + ECMR);
862 } 880 }
863 } 881 }
864 } 882 }
@@ -867,6 +885,8 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
867 /* Write buck end. unused write back interrupt */ 885 /* Write buck end. unused write back interrupt */
868 if (intr_status & EESR_TABT) /* Transmit Abort int */ 886 if (intr_status & EESR_TABT) /* Transmit Abort int */
869 mdp->stats.tx_aborted_errors++; 887 mdp->stats.tx_aborted_errors++;
888 if (netif_msg_tx_err(mdp))
889 dev_err(&ndev->dev, "Transmit Abort\n");
870 } 890 }
871 891
872 if (intr_status & EESR_RABT) { 892 if (intr_status & EESR_RABT) {
@@ -874,14 +894,23 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
874 if (intr_status & EESR_RFRMER) { 894 if (intr_status & EESR_RFRMER) {
875 /* Receive Frame Overflow int */ 895 /* Receive Frame Overflow int */
876 mdp->stats.rx_frame_errors++; 896 mdp->stats.rx_frame_errors++;
877 dev_err(&ndev->dev, "Receive Frame Overflow\n"); 897 if (netif_msg_rx_err(mdp))
898 dev_err(&ndev->dev, "Receive Abort\n");
878 } 899 }
879 } 900 }
880 901
881 if (!mdp->cd->no_ade) { 902 if (intr_status & EESR_TDE) {
882 if (intr_status & EESR_ADE && intr_status & EESR_TDE && 903 /* Transmit Descriptor Empty int */
883 intr_status & EESR_TFE) 904 mdp->stats.tx_fifo_errors++;
884 mdp->stats.tx_fifo_errors++; 905 if (netif_msg_tx_err(mdp))
906 dev_err(&ndev->dev, "Transmit Descriptor Empty\n");
907 }
908
909 if (intr_status & EESR_TFE) {
910 /* FIFO under flow */
911 mdp->stats.tx_fifo_errors++;
912 if (netif_msg_tx_err(mdp))
913 dev_err(&ndev->dev, "Transmit FIFO Under flow\n");
885 } 914 }
886 915
887 if (intr_status & EESR_RDE) { 916 if (intr_status & EESR_RDE) {
@@ -890,12 +919,22 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
890 919
891 if (readl(ioaddr + EDRRR) ^ EDRRR_R) 920 if (readl(ioaddr + EDRRR) ^ EDRRR_R)
892 writel(EDRRR_R, ioaddr + EDRRR); 921 writel(EDRRR_R, ioaddr + EDRRR);
893 dev_err(&ndev->dev, "Receive Descriptor Empty\n"); 922 if (netif_msg_rx_err(mdp))
923 dev_err(&ndev->dev, "Receive Descriptor Empty\n");
894 } 924 }
925
895 if (intr_status & EESR_RFE) { 926 if (intr_status & EESR_RFE) {
896 /* Receive FIFO Overflow int */ 927 /* Receive FIFO Overflow int */
897 mdp->stats.rx_fifo_errors++; 928 mdp->stats.rx_fifo_errors++;
898 dev_err(&ndev->dev, "Receive FIFO Overflow\n"); 929 if (netif_msg_rx_err(mdp))
930 dev_err(&ndev->dev, "Receive FIFO Overflow\n");
931 }
932
933 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
934 /* Address Error */
935 mdp->stats.tx_fifo_errors++;
936 if (netif_msg_tx_err(mdp))
937 dev_err(&ndev->dev, "Address Error\n");
899 } 938 }
900 939
901 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE; 940 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
@@ -1012,7 +1051,7 @@ static void sh_eth_adjust_link(struct net_device *ndev)
1012 mdp->duplex = -1; 1051 mdp->duplex = -1;
1013 } 1052 }
1014 1053
1015 if (new_state) 1054 if (new_state && netif_msg_link(mdp))
1016 phy_print_status(phydev); 1055 phy_print_status(phydev);
1017} 1056}
1018 1057
@@ -1063,6 +1102,132 @@ static int sh_eth_phy_start(struct net_device *ndev)
1063 return 0; 1102 return 0;
1064} 1103}
1065 1104
1105static int sh_eth_get_settings(struct net_device *ndev,
1106 struct ethtool_cmd *ecmd)
1107{
1108 struct sh_eth_private *mdp = netdev_priv(ndev);
1109 unsigned long flags;
1110 int ret;
1111
1112 spin_lock_irqsave(&mdp->lock, flags);
1113 ret = phy_ethtool_gset(mdp->phydev, ecmd);
1114 spin_unlock_irqrestore(&mdp->lock, flags);
1115
1116 return ret;
1117}
1118
1119static int sh_eth_set_settings(struct net_device *ndev,
1120 struct ethtool_cmd *ecmd)
1121{
1122 struct sh_eth_private *mdp = netdev_priv(ndev);
1123 unsigned long flags;
1124 int ret;
1125 u32 ioaddr = ndev->base_addr;
1126
1127 spin_lock_irqsave(&mdp->lock, flags);
1128
1129 /* disable tx and rx */
1130 sh_eth_rcv_snd_disable(ioaddr);
1131
1132 ret = phy_ethtool_sset(mdp->phydev, ecmd);
1133 if (ret)
1134 goto error_exit;
1135
1136 if (ecmd->duplex == DUPLEX_FULL)
1137 mdp->duplex = 1;
1138 else
1139 mdp->duplex = 0;
1140
1141 if (mdp->cd->set_duplex)
1142 mdp->cd->set_duplex(ndev);
1143
1144error_exit:
1145 mdelay(1);
1146
1147 /* enable tx and rx */
1148 sh_eth_rcv_snd_enable(ioaddr);
1149
1150 spin_unlock_irqrestore(&mdp->lock, flags);
1151
1152 return ret;
1153}
1154
1155static int sh_eth_nway_reset(struct net_device *ndev)
1156{
1157 struct sh_eth_private *mdp = netdev_priv(ndev);
1158 unsigned long flags;
1159 int ret;
1160
1161 spin_lock_irqsave(&mdp->lock, flags);
1162 ret = phy_start_aneg(mdp->phydev);
1163 spin_unlock_irqrestore(&mdp->lock, flags);
1164
1165 return ret;
1166}
1167
1168static u32 sh_eth_get_msglevel(struct net_device *ndev)
1169{
1170 struct sh_eth_private *mdp = netdev_priv(ndev);
1171 return mdp->msg_enable;
1172}
1173
1174static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
1175{
1176 struct sh_eth_private *mdp = netdev_priv(ndev);
1177 mdp->msg_enable = value;
1178}
1179
1180static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
1181 "rx_current", "tx_current",
1182 "rx_dirty", "tx_dirty",
1183};
1184#define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats)
1185
1186static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
1187{
1188 switch (sset) {
1189 case ETH_SS_STATS:
1190 return SH_ETH_STATS_LEN;
1191 default:
1192 return -EOPNOTSUPP;
1193 }
1194}
1195
1196static void sh_eth_get_ethtool_stats(struct net_device *ndev,
1197 struct ethtool_stats *stats, u64 *data)
1198{
1199 struct sh_eth_private *mdp = netdev_priv(ndev);
1200 int i = 0;
1201
1202 /* device-specific stats */
1203 data[i++] = mdp->cur_rx;
1204 data[i++] = mdp->cur_tx;
1205 data[i++] = mdp->dirty_rx;
1206 data[i++] = mdp->dirty_tx;
1207}
1208
1209static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1210{
1211 switch (stringset) {
1212 case ETH_SS_STATS:
1213 memcpy(data, *sh_eth_gstrings_stats,
1214 sizeof(sh_eth_gstrings_stats));
1215 break;
1216 }
1217}
1218
1219static struct ethtool_ops sh_eth_ethtool_ops = {
1220 .get_settings = sh_eth_get_settings,
1221 .set_settings = sh_eth_set_settings,
1222 .nway_reset = sh_eth_nway_reset,
1223 .get_msglevel = sh_eth_get_msglevel,
1224 .set_msglevel = sh_eth_set_msglevel,
1225 .get_link = ethtool_op_get_link,
1226 .get_strings = sh_eth_get_strings,
1227 .get_ethtool_stats = sh_eth_get_ethtool_stats,
1228 .get_sset_count = sh_eth_get_sset_count,
1229};
1230
1066/* network device open function */ 1231/* network device open function */
1067static int sh_eth_open(struct net_device *ndev) 1232static int sh_eth_open(struct net_device *ndev)
1068{ 1233{
@@ -1073,8 +1238,8 @@ static int sh_eth_open(struct net_device *ndev)
1073 1238
1074 ret = request_irq(ndev->irq, sh_eth_interrupt, 1239 ret = request_irq(ndev->irq, sh_eth_interrupt,
1075#if defined(CONFIG_CPU_SUBTYPE_SH7763) || \ 1240#if defined(CONFIG_CPU_SUBTYPE_SH7763) || \
1076 defined(CONFIG_CPU_SUBTYPE_SH7764) || \ 1241 defined(CONFIG_CPU_SUBTYPE_SH7764) || \
1077 defined(CONFIG_CPU_SUBTYPE_SH7757) 1242 defined(CONFIG_CPU_SUBTYPE_SH7757)
1078 IRQF_SHARED, 1243 IRQF_SHARED,
1079#else 1244#else
1080 0, 1245 0,
@@ -1123,8 +1288,8 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
1123 1288
1124 netif_stop_queue(ndev); 1289 netif_stop_queue(ndev);
1125 1290
1126 /* worning message out. */ 1291 if (netif_msg_timer(mdp))
1127 printk(KERN_WARNING "%s: transmit timed out, status %8.8x," 1292 dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x,"
1128 " resetting...\n", ndev->name, (int)readl(ioaddr + EESR)); 1293 " resetting...\n", ndev->name, (int)readl(ioaddr + EESR));
1129 1294
1130 /* tx_errors count up */ 1295 /* tx_errors count up */
@@ -1167,6 +1332,8 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1167 spin_lock_irqsave(&mdp->lock, flags); 1332 spin_lock_irqsave(&mdp->lock, flags);
1168 if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) { 1333 if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
1169 if (!sh_eth_txfree(ndev)) { 1334 if (!sh_eth_txfree(ndev)) {
1335 if (netif_msg_tx_queued(mdp))
1336 dev_warn(&ndev->dev, "TxFD exhausted.\n");
1170 netif_stop_queue(ndev); 1337 netif_stop_queue(ndev);
1171 spin_unlock_irqrestore(&mdp->lock, flags); 1338 spin_unlock_irqrestore(&mdp->lock, flags);
1172 return NETDEV_TX_BUSY; 1339 return NETDEV_TX_BUSY;
@@ -1497,8 +1664,11 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
1497 1664
1498 /* set function */ 1665 /* set function */
1499 ndev->netdev_ops = &sh_eth_netdev_ops; 1666 ndev->netdev_ops = &sh_eth_netdev_ops;
1667 SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops);
1500 ndev->watchdog_timeo = TX_TIMEOUT; 1668 ndev->watchdog_timeo = TX_TIMEOUT;
1501 1669
1670 /* debug message level */
1671 mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
1502 mdp->post_rx = POST_RX >> (devno << 1); 1672 mdp->post_rx = POST_RX >> (devno << 1);
1503 mdp->post_fw = POST_FW >> (devno << 1); 1673 mdp->post_fw = POST_FW >> (devno << 1);
1504 1674
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 640e368ebee..84d4167eee9 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -495,7 +495,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
495 sis_priv->mii_info.reg_num_mask = 0x1f; 495 sis_priv->mii_info.reg_num_mask = 0x1f;
496 496
497 /* Get Mac address according to the chip revision */ 497 /* Get Mac address according to the chip revision */
498 pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &(sis_priv->chipset_rev)); 498 sis_priv->chipset_rev = pci_dev->revision;
499 if(netif_msg_probe(sis_priv)) 499 if(netif_msg_probe(sis_priv))
500 printk(KERN_DEBUG "%s: detected revision %2.2x, " 500 printk(KERN_DEBUG "%s: detected revision %2.2x, "
501 "trying to get MAC address...\n", 501 "trying to get MAC address...\n",
@@ -532,7 +532,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
532 /* save our host bridge revision */ 532 /* save our host bridge revision */
533 dev = pci_get_device(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_630, NULL); 533 dev = pci_get_device(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_630, NULL);
534 if (dev) { 534 if (dev) {
535 pci_read_config_byte(dev, PCI_CLASS_REVISION, &sis_priv->host_bridge_rev); 535 sis_priv->host_bridge_rev = dev->revision;
536 pci_dev_put(dev); 536 pci_dev_put(dev);
537 } 537 }
538 538
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 726df611ee1..43654a3bb0e 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -81,6 +81,7 @@ static const char version[] =
81#include <linux/ethtool.h> 81#include <linux/ethtool.h>
82#include <linux/mii.h> 82#include <linux/mii.h>
83#include <linux/workqueue.h> 83#include <linux/workqueue.h>
84#include <linux/of.h>
84 85
85#include <linux/netdevice.h> 86#include <linux/netdevice.h>
86#include <linux/etherdevice.h> 87#include <linux/etherdevice.h>
@@ -2394,6 +2395,15 @@ static int smc_drv_resume(struct device *dev)
2394 return 0; 2395 return 0;
2395} 2396}
2396 2397
2398#ifdef CONFIG_OF
2399static const struct of_device_id smc91x_match[] = {
2400 { .compatible = "smsc,lan91c94", },
2401 { .compatible = "smsc,lan91c111", },
2402 {},
2403}
2404MODULE_DEVICE_TABLE(of, smc91x_match);
2405#endif
2406
2397static struct dev_pm_ops smc_drv_pm_ops = { 2407static struct dev_pm_ops smc_drv_pm_ops = {
2398 .suspend = smc_drv_suspend, 2408 .suspend = smc_drv_suspend,
2399 .resume = smc_drv_resume, 2409 .resume = smc_drv_resume,
@@ -2406,6 +2416,9 @@ static struct platform_driver smc_driver = {
2406 .name = CARDNAME, 2416 .name = CARDNAME,
2407 .owner = THIS_MODULE, 2417 .owner = THIS_MODULE,
2408 .pm = &smc_drv_pm_ops, 2418 .pm = &smc_drv_pm_ops,
2419#ifdef CONFIG_OF
2420 .of_match_table = smc91x_match,
2421#endif
2409 }, 2422 },
2410}; 2423};
2411 2424
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 1c5408f8393..c1a344829b5 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -320,28 +320,28 @@ static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_s
320 320
321 if (txmac_stat & MAC_TXSTAT_URUN) { 321 if (txmac_stat & MAC_TXSTAT_URUN) {
322 netdev_err(dev, "TX MAC xmit underrun\n"); 322 netdev_err(dev, "TX MAC xmit underrun\n");
323 gp->net_stats.tx_fifo_errors++; 323 dev->stats.tx_fifo_errors++;
324 } 324 }
325 325
326 if (txmac_stat & MAC_TXSTAT_MPE) { 326 if (txmac_stat & MAC_TXSTAT_MPE) {
327 netdev_err(dev, "TX MAC max packet size error\n"); 327 netdev_err(dev, "TX MAC max packet size error\n");
328 gp->net_stats.tx_errors++; 328 dev->stats.tx_errors++;
329 } 329 }
330 330
331 /* The rest are all cases of one of the 16-bit TX 331 /* The rest are all cases of one of the 16-bit TX
332 * counters expiring. 332 * counters expiring.
333 */ 333 */
334 if (txmac_stat & MAC_TXSTAT_NCE) 334 if (txmac_stat & MAC_TXSTAT_NCE)
335 gp->net_stats.collisions += 0x10000; 335 dev->stats.collisions += 0x10000;
336 336
337 if (txmac_stat & MAC_TXSTAT_ECE) { 337 if (txmac_stat & MAC_TXSTAT_ECE) {
338 gp->net_stats.tx_aborted_errors += 0x10000; 338 dev->stats.tx_aborted_errors += 0x10000;
339 gp->net_stats.collisions += 0x10000; 339 dev->stats.collisions += 0x10000;
340 } 340 }
341 341
342 if (txmac_stat & MAC_TXSTAT_LCE) { 342 if (txmac_stat & MAC_TXSTAT_LCE) {
343 gp->net_stats.tx_aborted_errors += 0x10000; 343 dev->stats.tx_aborted_errors += 0x10000;
344 gp->net_stats.collisions += 0x10000; 344 dev->stats.collisions += 0x10000;
345 } 345 }
346 346
347 /* We do not keep track of MAC_TXSTAT_FCE and 347 /* We do not keep track of MAC_TXSTAT_FCE and
@@ -469,20 +469,20 @@ static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_s
469 u32 smac = readl(gp->regs + MAC_SMACHINE); 469 u32 smac = readl(gp->regs + MAC_SMACHINE);
470 470
471 netdev_err(dev, "RX MAC fifo overflow smac[%08x]\n", smac); 471 netdev_err(dev, "RX MAC fifo overflow smac[%08x]\n", smac);
472 gp->net_stats.rx_over_errors++; 472 dev->stats.rx_over_errors++;
473 gp->net_stats.rx_fifo_errors++; 473 dev->stats.rx_fifo_errors++;
474 474
475 ret = gem_rxmac_reset(gp); 475 ret = gem_rxmac_reset(gp);
476 } 476 }
477 477
478 if (rxmac_stat & MAC_RXSTAT_ACE) 478 if (rxmac_stat & MAC_RXSTAT_ACE)
479 gp->net_stats.rx_frame_errors += 0x10000; 479 dev->stats.rx_frame_errors += 0x10000;
480 480
481 if (rxmac_stat & MAC_RXSTAT_CCE) 481 if (rxmac_stat & MAC_RXSTAT_CCE)
482 gp->net_stats.rx_crc_errors += 0x10000; 482 dev->stats.rx_crc_errors += 0x10000;
483 483
484 if (rxmac_stat & MAC_RXSTAT_LCE) 484 if (rxmac_stat & MAC_RXSTAT_LCE)
485 gp->net_stats.rx_length_errors += 0x10000; 485 dev->stats.rx_length_errors += 0x10000;
486 486
487 /* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE 487 /* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE
488 * events. 488 * events.
@@ -594,7 +594,7 @@ static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_stat
594 if (netif_msg_rx_err(gp)) 594 if (netif_msg_rx_err(gp))
595 printk(KERN_DEBUG "%s: no buffer for rx frame\n", 595 printk(KERN_DEBUG "%s: no buffer for rx frame\n",
596 gp->dev->name); 596 gp->dev->name);
597 gp->net_stats.rx_dropped++; 597 dev->stats.rx_dropped++;
598 } 598 }
599 599
600 if (gem_status & GREG_STAT_RXTAGERR) { 600 if (gem_status & GREG_STAT_RXTAGERR) {
@@ -602,7 +602,7 @@ static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_stat
602 if (netif_msg_rx_err(gp)) 602 if (netif_msg_rx_err(gp))
603 printk(KERN_DEBUG "%s: corrupt rx tag framing\n", 603 printk(KERN_DEBUG "%s: corrupt rx tag framing\n",
604 gp->dev->name); 604 gp->dev->name);
605 gp->net_stats.rx_errors++; 605 dev->stats.rx_errors++;
606 606
607 goto do_reset; 607 goto do_reset;
608 } 608 }
@@ -684,7 +684,7 @@ static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_st
684 break; 684 break;
685 } 685 }
686 gp->tx_skbs[entry] = NULL; 686 gp->tx_skbs[entry] = NULL;
687 gp->net_stats.tx_bytes += skb->len; 687 dev->stats.tx_bytes += skb->len;
688 688
689 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { 689 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
690 txd = &gp->init_block->txd[entry]; 690 txd = &gp->init_block->txd[entry];
@@ -696,7 +696,7 @@ static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_st
696 entry = NEXT_TX(entry); 696 entry = NEXT_TX(entry);
697 } 697 }
698 698
699 gp->net_stats.tx_packets++; 699 dev->stats.tx_packets++;
700 dev_kfree_skb_irq(skb); 700 dev_kfree_skb_irq(skb);
701 } 701 }
702 gp->tx_old = entry; 702 gp->tx_old = entry;
@@ -738,6 +738,7 @@ static __inline__ void gem_post_rxds(struct gem *gp, int limit)
738 738
739static int gem_rx(struct gem *gp, int work_to_do) 739static int gem_rx(struct gem *gp, int work_to_do)
740{ 740{
741 struct net_device *dev = gp->dev;
741 int entry, drops, work_done = 0; 742 int entry, drops, work_done = 0;
742 u32 done; 743 u32 done;
743 __sum16 csum; 744 __sum16 csum;
@@ -782,15 +783,15 @@ static int gem_rx(struct gem *gp, int work_to_do)
782 783
783 len = (status & RXDCTRL_BUFSZ) >> 16; 784 len = (status & RXDCTRL_BUFSZ) >> 16;
784 if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) { 785 if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) {
785 gp->net_stats.rx_errors++; 786 dev->stats.rx_errors++;
786 if (len < ETH_ZLEN) 787 if (len < ETH_ZLEN)
787 gp->net_stats.rx_length_errors++; 788 dev->stats.rx_length_errors++;
788 if (len & RXDCTRL_BAD) 789 if (len & RXDCTRL_BAD)
789 gp->net_stats.rx_crc_errors++; 790 dev->stats.rx_crc_errors++;
790 791
791 /* We'll just return it to GEM. */ 792 /* We'll just return it to GEM. */
792 drop_it: 793 drop_it:
793 gp->net_stats.rx_dropped++; 794 dev->stats.rx_dropped++;
794 goto next; 795 goto next;
795 } 796 }
796 797
@@ -843,8 +844,8 @@ static int gem_rx(struct gem *gp, int work_to_do)
843 844
844 netif_receive_skb(skb); 845 netif_receive_skb(skb);
845 846
846 gp->net_stats.rx_packets++; 847 dev->stats.rx_packets++;
847 gp->net_stats.rx_bytes += len; 848 dev->stats.rx_bytes += len;
848 849
849 next: 850 next:
850 entry = NEXT_RX(entry); 851 entry = NEXT_RX(entry);
@@ -2472,7 +2473,6 @@ static int gem_resume(struct pci_dev *pdev)
2472static struct net_device_stats *gem_get_stats(struct net_device *dev) 2473static struct net_device_stats *gem_get_stats(struct net_device *dev)
2473{ 2474{
2474 struct gem *gp = netdev_priv(dev); 2475 struct gem *gp = netdev_priv(dev);
2475 struct net_device_stats *stats = &gp->net_stats;
2476 2476
2477 spin_lock_irq(&gp->lock); 2477 spin_lock_irq(&gp->lock);
2478 spin_lock(&gp->tx_lock); 2478 spin_lock(&gp->tx_lock);
@@ -2481,17 +2481,17 @@ static struct net_device_stats *gem_get_stats(struct net_device *dev)
2481 * so we shield against this 2481 * so we shield against this
2482 */ 2482 */
2483 if (gp->running) { 2483 if (gp->running) {
2484 stats->rx_crc_errors += readl(gp->regs + MAC_FCSERR); 2484 dev->stats.rx_crc_errors += readl(gp->regs + MAC_FCSERR);
2485 writel(0, gp->regs + MAC_FCSERR); 2485 writel(0, gp->regs + MAC_FCSERR);
2486 2486
2487 stats->rx_frame_errors += readl(gp->regs + MAC_AERR); 2487 dev->stats.rx_frame_errors += readl(gp->regs + MAC_AERR);
2488 writel(0, gp->regs + MAC_AERR); 2488 writel(0, gp->regs + MAC_AERR);
2489 2489
2490 stats->rx_length_errors += readl(gp->regs + MAC_LERR); 2490 dev->stats.rx_length_errors += readl(gp->regs + MAC_LERR);
2491 writel(0, gp->regs + MAC_LERR); 2491 writel(0, gp->regs + MAC_LERR);
2492 2492
2493 stats->tx_aborted_errors += readl(gp->regs + MAC_ECOLL); 2493 dev->stats.tx_aborted_errors += readl(gp->regs + MAC_ECOLL);
2494 stats->collisions += 2494 dev->stats.collisions +=
2495 (readl(gp->regs + MAC_ECOLL) + 2495 (readl(gp->regs + MAC_ECOLL) +
2496 readl(gp->regs + MAC_LCOLL)); 2496 readl(gp->regs + MAC_LCOLL));
2497 writel(0, gp->regs + MAC_ECOLL); 2497 writel(0, gp->regs + MAC_ECOLL);
@@ -2501,7 +2501,7 @@ static struct net_device_stats *gem_get_stats(struct net_device *dev)
2501 spin_unlock(&gp->tx_lock); 2501 spin_unlock(&gp->tx_lock);
2502 spin_unlock_irq(&gp->lock); 2502 spin_unlock_irq(&gp->lock);
2503 2503
2504 return &gp->net_stats; 2504 return &dev->stats;
2505} 2505}
2506 2506
2507static int gem_set_mac_address(struct net_device *dev, void *addr) 2507static int gem_set_mac_address(struct net_device *dev, void *addr)
diff --git a/drivers/net/sungem.h b/drivers/net/sungem.h
index 19905460def..ede01787236 100644
--- a/drivers/net/sungem.h
+++ b/drivers/net/sungem.h
@@ -994,7 +994,6 @@ struct gem {
994 u32 status; 994 u32 status;
995 995
996 struct napi_struct napi; 996 struct napi_struct napi;
997 struct net_device_stats net_stats;
998 997
999 int tx_fifo_sz; 998 int tx_fifo_sz;
1000 int rx_fifo_sz; 999 int rx_fifo_sz;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 06c0e503365..6be418591df 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -4,7 +4,7 @@
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) 4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) 5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc. 6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2010 Broadcom Corporation. 7 * Copyright (C) 2005-2011 Broadcom Corporation.
8 * 8 *
9 * Firmware is: 9 * Firmware is:
10 * Derived from proprietary unpublished source code, 10 * Derived from proprietary unpublished source code,
@@ -64,10 +64,10 @@
64 64
65#define DRV_MODULE_NAME "tg3" 65#define DRV_MODULE_NAME "tg3"
66#define TG3_MAJ_NUM 3 66#define TG3_MAJ_NUM 3
67#define TG3_MIN_NUM 116 67#define TG3_MIN_NUM 117
68#define DRV_MODULE_VERSION \ 68#define DRV_MODULE_VERSION \
69 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) 69 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
70#define DRV_MODULE_RELDATE "December 3, 2010" 70#define DRV_MODULE_RELDATE "January 25, 2011"
71 71
72#define TG3_DEF_MAC_MODE 0 72#define TG3_DEF_MAC_MODE 0
73#define TG3_DEF_RX_MODE 0 73#define TG3_DEF_RX_MODE 0
@@ -1776,9 +1776,29 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1776 tg3_phy_cl45_read(tp, MDIO_MMD_AN, 1776 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1777 TG3_CL45_D7_EEERES_STAT, &val); 1777 TG3_CL45_D7_EEERES_STAT, &val);
1778 1778
1779 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T || 1779 switch (val) {
1780 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) 1780 case TG3_CL45_D7_EEERES_STAT_LP_1000T:
1781 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
1782 case ASIC_REV_5717:
1783 case ASIC_REV_5719:
1784 case ASIC_REV_57765:
1785 /* Enable SM_DSP clock and tx 6dB coding. */
1786 val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1787 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1788 MII_TG3_AUXCTL_ACTL_TX_6DB;
1789 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1790
1791 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
1792
1793 /* Turn off SM_DSP clock. */
1794 val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1795 MII_TG3_AUXCTL_ACTL_TX_6DB;
1796 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1797 }
1798 /* Fallthrough */
1799 case TG3_CL45_D7_EEERES_STAT_LP_100TX:
1781 tp->setlpicnt = 2; 1800 tp->setlpicnt = 2;
1801 }
1782 } 1802 }
1783 1803
1784 if (!tp->setlpicnt) { 1804 if (!tp->setlpicnt) {
@@ -2968,11 +2988,19 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
2968 MII_TG3_AUXCTL_ACTL_TX_6DB; 2988 MII_TG3_AUXCTL_ACTL_TX_6DB;
2969 tg3_writephy(tp, MII_TG3_AUX_CTRL, val); 2989 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2970 2990
2971 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 2991 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
2972 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) && 2992 case ASIC_REV_5717:
2973 !tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val)) 2993 case ASIC_REV_57765:
2974 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, 2994 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
2975 val | MII_TG3_DSP_CH34TP2_HIBW01); 2995 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
2996 MII_TG3_DSP_CH34TP2_HIBW01);
2997 /* Fall through */
2998 case ASIC_REV_5719:
2999 val = MII_TG3_DSP_TAP26_ALNOKO |
3000 MII_TG3_DSP_TAP26_RMRXSTO |
3001 MII_TG3_DSP_TAP26_OPCSINPT;
3002 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3003 }
2976 3004
2977 val = 0; 3005 val = 0;
2978 if (tp->link_config.autoneg == AUTONEG_ENABLE) { 3006 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
@@ -7801,7 +7829,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7801 TG3_CPMU_DBTMR1_LNKIDLE_2047US); 7829 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
7802 7830
7803 tw32_f(TG3_CPMU_EEE_DBTMR2, 7831 tw32_f(TG3_CPMU_EEE_DBTMR2,
7804 TG3_CPMU_DBTMR1_APE_TX_2047US | 7832 TG3_CPMU_DBTMR2_APE_TX_2047US |
7805 TG3_CPMU_DBTMR2_TXIDXEQ_2047US); 7833 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
7806 } 7834 }
7807 7835
@@ -8075,8 +8103,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8075 /* Program the jumbo buffer descriptor ring control 8103 /* Program the jumbo buffer descriptor ring control
8076 * blocks on those devices that have them. 8104 * blocks on those devices that have them.
8077 */ 8105 */
8078 if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && 8106 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8079 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { 8107 ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
8108 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))) {
8080 /* Setup replenish threshold. */ 8109 /* Setup replenish threshold. */
8081 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8); 8110 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
8082 8111
@@ -8194,8 +8223,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8194 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) { 8223 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
8195 val = tr32(TG3_RDMA_RSRVCTRL_REG); 8224 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8196 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) { 8225 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
8197 val &= ~TG3_RDMA_RSRVCTRL_TXMRGN_MASK; 8226 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8198 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B; 8227 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8228 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8229 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8230 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8231 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8199 } 8232 }
8200 tw32(TG3_RDMA_RSRVCTRL_REG, 8233 tw32(TG3_RDMA_RSRVCTRL_REG,
8201 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX); 8234 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
@@ -8317,7 +8350,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8317 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); 8350 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8318 udelay(100); 8351 udelay(100);
8319 8352
8320 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) { 8353 if ((tp->tg3_flags2 & TG3_FLG2_USING_MSIX) &&
8354 tp->irq_cnt > 1) {
8321 val = tr32(MSGINT_MODE); 8355 val = tr32(MSGINT_MODE);
8322 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE; 8356 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8323 tw32(MSGINT_MODE, val); 8357 tw32(MSGINT_MODE, val);
@@ -9057,7 +9091,8 @@ static void tg3_ints_init(struct tg3 *tp)
9057 9091
9058 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) { 9092 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
9059 u32 msi_mode = tr32(MSGINT_MODE); 9093 u32 msi_mode = tr32(MSGINT_MODE);
9060 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) 9094 if ((tp->tg3_flags2 & TG3_FLG2_USING_MSIX) &&
9095 tp->irq_cnt > 1)
9061 msi_mode |= MSGINT_MODE_MULTIVEC_EN; 9096 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9062 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE); 9097 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9063 } 9098 }
@@ -10833,13 +10868,16 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10833 if (loopback_mode == TG3_MAC_LOOPBACK) { 10868 if (loopback_mode == TG3_MAC_LOOPBACK) {
10834 /* HW errata - mac loopback fails in some cases on 5780. 10869 /* HW errata - mac loopback fails in some cases on 5780.
10835 * Normal traffic and PHY loopback are not affected by 10870 * Normal traffic and PHY loopback are not affected by
10836 * errata. 10871 * errata. Also, the MAC loopback test is deprecated for
10872 * all newer ASIC revisions.
10837 */ 10873 */
10838 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) 10874 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10875 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
10839 return 0; 10876 return 0;
10840 10877
10841 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) | 10878 mac_mode = tp->mac_mode &
10842 MAC_MODE_PORT_INT_LPBACK; 10879 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
10880 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
10843 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 10881 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10844 mac_mode |= MAC_MODE_LINK_POLARITY; 10882 mac_mode |= MAC_MODE_LINK_POLARITY;
10845 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) 10883 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
@@ -10861,7 +10899,8 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10861 tg3_writephy(tp, MII_BMCR, val); 10899 tg3_writephy(tp, MII_BMCR, val);
10862 udelay(40); 10900 udelay(40);
10863 10901
10864 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; 10902 mac_mode = tp->mac_mode &
10903 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
10865 if (tp->phy_flags & TG3_PHYFLG_IS_FET) { 10904 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
10866 tg3_writephy(tp, MII_TG3_FET_PTEST, 10905 tg3_writephy(tp, MII_TG3_FET_PTEST,
10867 MII_TG3_FET_PTEST_FRC_TX_LINK | 10906 MII_TG3_FET_PTEST_FRC_TX_LINK |
@@ -10889,6 +10928,13 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10889 MII_TG3_EXT_CTRL_LNK3_LED_MODE); 10928 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10890 } 10929 }
10891 tw32(MAC_MODE, mac_mode); 10930 tw32(MAC_MODE, mac_mode);
10931
10932 /* Wait for link */
10933 for (i = 0; i < 100; i++) {
10934 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
10935 break;
10936 mdelay(1);
10937 }
10892 } else { 10938 } else {
10893 return -EINVAL; 10939 return -EINVAL;
10894 } 10940 }
@@ -10995,14 +11041,19 @@ out:
10995static int tg3_test_loopback(struct tg3 *tp) 11041static int tg3_test_loopback(struct tg3 *tp)
10996{ 11042{
10997 int err = 0; 11043 int err = 0;
10998 u32 cpmuctrl = 0; 11044 u32 eee_cap, cpmuctrl = 0;
10999 11045
11000 if (!netif_running(tp->dev)) 11046 if (!netif_running(tp->dev))
11001 return TG3_LOOPBACK_FAILED; 11047 return TG3_LOOPBACK_FAILED;
11002 11048
11049 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11050 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11051
11003 err = tg3_reset_hw(tp, 1); 11052 err = tg3_reset_hw(tp, 1);
11004 if (err) 11053 if (err) {
11005 return TG3_LOOPBACK_FAILED; 11054 err = TG3_LOOPBACK_FAILED;
11055 goto done;
11056 }
11006 11057
11007 /* Turn off gphy autopowerdown. */ 11058 /* Turn off gphy autopowerdown. */
11008 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) 11059 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
@@ -11022,8 +11073,10 @@ static int tg3_test_loopback(struct tg3 *tp)
11022 udelay(10); 11073 udelay(10);
11023 } 11074 }
11024 11075
11025 if (status != CPMU_MUTEX_GNT_DRIVER) 11076 if (status != CPMU_MUTEX_GNT_DRIVER) {
11026 return TG3_LOOPBACK_FAILED; 11077 err = TG3_LOOPBACK_FAILED;
11078 goto done;
11079 }
11027 11080
11028 /* Turn off link-based power management. */ 11081 /* Turn off link-based power management. */
11029 cpmuctrl = tr32(TG3_CPMU_CTRL); 11082 cpmuctrl = tr32(TG3_CPMU_CTRL);
@@ -11052,6 +11105,9 @@ static int tg3_test_loopback(struct tg3 *tp)
11052 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) 11105 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11053 tg3_phy_toggle_apd(tp, true); 11106 tg3_phy_toggle_apd(tp, true);
11054 11107
11108done:
11109 tp->phy_flags |= eee_cap;
11110
11055 return err; 11111 return err;
11056} 11112}
11057 11113
@@ -12407,9 +12463,11 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12407 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN; 12463 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
12408 } 12464 }
12409done: 12465done:
12410 device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP); 12466 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
12411 device_set_wakeup_enable(&tp->pdev->dev, 12467 device_set_wakeup_enable(&tp->pdev->dev,
12412 tp->tg3_flags & TG3_FLAG_WOL_ENABLE); 12468 tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
12469 else
12470 device_set_wakeup_capable(&tp->pdev->dev, false);
12413} 12471}
12414 12472
12415static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd) 12473static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
@@ -13262,7 +13320,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13262 } 13320 }
13263 13321
13264 /* Determine TSO capabilities */ 13322 /* Determine TSO capabilities */
13265 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) 13323 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13324 ; /* Do nothing. HW bug. */
13325 else if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
13266 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3; 13326 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
13267 else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || 13327 else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13268 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 13328 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
@@ -13313,7 +13373,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13313 tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG; 13373 tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
13314 } 13374 }
13315 13375
13316 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) 13376 if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
13377 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
13317 tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG; 13378 tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;
13318 13379
13319 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || 13380 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
@@ -13331,42 +13392,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13331 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS; 13392 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
13332 13393
13333 tp->pcie_readrq = 4096; 13394 tp->pcie_readrq = 4096;
13334 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) { 13395 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13335 u16 word; 13396 tp->pcie_readrq = 2048;
13336
13337 pci_read_config_word(tp->pdev,
13338 tp->pcie_cap + PCI_EXP_LNKSTA,
13339 &word);
13340 switch (word & PCI_EXP_LNKSTA_CLS) {
13341 case PCI_EXP_LNKSTA_CLS_2_5GB:
13342 word &= PCI_EXP_LNKSTA_NLW;
13343 word >>= PCI_EXP_LNKSTA_NLW_SHIFT;
13344 switch (word) {
13345 case 2:
13346 tp->pcie_readrq = 2048;
13347 break;
13348 case 4:
13349 tp->pcie_readrq = 1024;
13350 break;
13351 }
13352 break;
13353
13354 case PCI_EXP_LNKSTA_CLS_5_0GB:
13355 word &= PCI_EXP_LNKSTA_NLW;
13356 word >>= PCI_EXP_LNKSTA_NLW_SHIFT;
13357 switch (word) {
13358 case 1:
13359 tp->pcie_readrq = 2048;
13360 break;
13361 case 2:
13362 tp->pcie_readrq = 1024;
13363 break;
13364 case 4:
13365 tp->pcie_readrq = 512;
13366 break;
13367 }
13368 }
13369 }
13370 13397
13371 pcie_set_readrq(tp->pdev, tp->pcie_readrq); 13398 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13372 13399
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index f528243e1a4..73884b69b74 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -4,7 +4,7 @@
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) 4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com) 5 * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc. 6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2007-2010 Broadcom Corporation. 7 * Copyright (C) 2007-2011 Broadcom Corporation.
8 */ 8 */
9 9
10#ifndef _T3_H 10#ifndef _T3_H
@@ -141,6 +141,7 @@
141#define CHIPREV_ID_57780_A1 0x57780001 141#define CHIPREV_ID_57780_A1 0x57780001
142#define CHIPREV_ID_5717_A0 0x05717000 142#define CHIPREV_ID_5717_A0 0x05717000
143#define CHIPREV_ID_57765_A0 0x57785000 143#define CHIPREV_ID_57765_A0 0x57785000
144#define CHIPREV_ID_5719_A0 0x05719000
144#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12) 145#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12)
145#define ASIC_REV_5700 0x07 146#define ASIC_REV_5700 0x07
146#define ASIC_REV_5701 0x00 147#define ASIC_REV_5701 0x00
@@ -1105,7 +1106,7 @@
1105#define TG3_CPMU_DBTMR1_PCIEXIT_2047US 0x07ff0000 1106#define TG3_CPMU_DBTMR1_PCIEXIT_2047US 0x07ff0000
1106#define TG3_CPMU_DBTMR1_LNKIDLE_2047US 0x000070ff 1107#define TG3_CPMU_DBTMR1_LNKIDLE_2047US 0x000070ff
1107#define TG3_CPMU_EEE_DBTMR2 0x000036b8 1108#define TG3_CPMU_EEE_DBTMR2 0x000036b8
1108#define TG3_CPMU_DBTMR1_APE_TX_2047US 0x07ff0000 1109#define TG3_CPMU_DBTMR2_APE_TX_2047US 0x07ff0000
1109#define TG3_CPMU_DBTMR2_TXIDXEQ_2047US 0x000070ff 1110#define TG3_CPMU_DBTMR2_TXIDXEQ_2047US 0x000070ff
1110#define TG3_CPMU_EEE_LNKIDL_CTRL 0x000036bc 1111#define TG3_CPMU_EEE_LNKIDL_CTRL 0x000036bc
1111#define TG3_CPMU_EEE_LNKIDL_PCIE_NL0 0x01000000 1112#define TG3_CPMU_EEE_LNKIDL_PCIE_NL0 0x01000000
@@ -1333,6 +1334,10 @@
1333 1334
1334#define TG3_RDMA_RSRVCTRL_REG 0x00004900 1335#define TG3_RDMA_RSRVCTRL_REG 0x00004900
1335#define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX 0x00000004 1336#define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX 0x00000004
1337#define TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K 0x00000c00
1338#define TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK 0x00000ff0
1339#define TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K 0x000c0000
1340#define TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK 0x000ff000
1336#define TG3_RDMA_RSRVCTRL_TXMRGN_320B 0x28000000 1341#define TG3_RDMA_RSRVCTRL_TXMRGN_320B 0x28000000
1337#define TG3_RDMA_RSRVCTRL_TXMRGN_MASK 0xffe00000 1342#define TG3_RDMA_RSRVCTRL_TXMRGN_MASK 0xffe00000
1338/* 0x4904 --> 0x4910 unused */ 1343/* 0x4904 --> 0x4910 unused */
@@ -2108,6 +2113,10 @@
2108 2113
2109#define MII_TG3_DSP_TAP1 0x0001 2114#define MII_TG3_DSP_TAP1 0x0001
2110#define MII_TG3_DSP_TAP1_AGCTGT_DFLT 0x0007 2115#define MII_TG3_DSP_TAP1_AGCTGT_DFLT 0x0007
2116#define MII_TG3_DSP_TAP26 0x001a
2117#define MII_TG3_DSP_TAP26_ALNOKO 0x0001
2118#define MII_TG3_DSP_TAP26_RMRXSTO 0x0002
2119#define MII_TG3_DSP_TAP26_OPCSINPT 0x0004
2111#define MII_TG3_DSP_AADJ1CH0 0x001f 2120#define MII_TG3_DSP_AADJ1CH0 0x001f
2112#define MII_TG3_DSP_CH34TP2 0x4022 2121#define MII_TG3_DSP_CH34TP2 0x4022
2113#define MII_TG3_DSP_CH34TP2_HIBW01 0x0010 2122#define MII_TG3_DSP_CH34TP2_HIBW01 0x0010
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index f8e463cd8ec..ace6404e2fa 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -25,150 +25,9 @@
25 * Microchip Technology, 24C01A/02A/04A Data Sheet 25 * Microchip Technology, 24C01A/02A/04A Data Sheet
26 * available in PDF format from www.microchip.com 26 * available in PDF format from www.microchip.com
27 * 27 *
28 * Change History 28 ******************************************************************************/
29 * 29
30 * Tigran Aivazian <tigran@sco.com>: TLan_PciProbe() now uses 30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 * new PCI BIOS interface.
32 * Alan Cox <alan@lxorguk.ukuu.org.uk>:
33 * Fixed the out of memory
34 * handling.
35 *
36 * Torben Mathiasen <torben.mathiasen@compaq.com> New Maintainer!
37 *
38 * v1.1 Dec 20, 1999 - Removed linux version checking
39 * Patch from Tigran Aivazian.
40 * - v1.1 includes Alan's SMP updates.
41 * - We still have problems on SMP though,
42 * but I'm looking into that.
43 *
44 * v1.2 Jan 02, 2000 - Hopefully fixed the SMP deadlock.
45 * - Removed dependency of HZ being 100.
46 * - We now allow higher priority timers to
47 * overwrite timers like TLAN_TIMER_ACTIVITY
48 * Patch from John Cagle <john.cagle@compaq.com>.
49 * - Fixed a few compiler warnings.
50 *
51 * v1.3 Feb 04, 2000 - Fixed the remaining HZ issues.
52 * - Removed call to pci_present().
53 * - Removed SA_INTERRUPT flag from irq handler.
54 * - Added __init and __initdata to reduce resisdent
55 * code size.
56 * - Driver now uses module_init/module_exit.
57 * - Rewrote init_module and tlan_probe to
58 * share a lot more code. We now use tlan_probe
59 * with builtin and module driver.
60 * - Driver ported to new net API.
61 * - tlan.txt has been reworked to reflect current
62 * driver (almost)
63 * - Other minor stuff
64 *
65 * v1.4 Feb 10, 2000 - Updated with more changes required after Dave's
66 * network cleanup in 2.3.43pre7 (Tigran & myself)
67 * - Minor stuff.
68 *
69 * v1.5 March 22, 2000 - Fixed another timer bug that would hang the driver
70 * if no cable/link were present.
71 * - Cosmetic changes.
72 * - TODO: Port completely to new PCI/DMA API
73 * Auto-Neg fallback.
74 *
75 * v1.6 April 04, 2000 - Fixed driver support for kernel-parameters. Haven't
76 * tested it though, as the kernel support is currently
77 * broken (2.3.99p4p3).
78 * - Updated tlan.txt accordingly.
79 * - Adjusted minimum/maximum frame length.
80 * - There is now a TLAN website up at
81 * http://hp.sourceforge.net/
82 *
83 * v1.7 April 07, 2000 - Started to implement custom ioctls. Driver now
84 * reports PHY information when used with Donald
85 * Beckers userspace MII diagnostics utility.
86 *
87 * v1.8 April 23, 2000 - Fixed support for forced speed/duplex settings.
88 * - Added link information to Auto-Neg and forced
89 * modes. When NIC operates with auto-neg the driver
90 * will report Link speed & duplex modes as well as
91 * link partner abilities. When forced link is used,
92 * the driver will report status of the established
93 * link.
94 * Please read tlan.txt for additional information.
95 * - Removed call to check_region(), and used
96 * return value of request_region() instead.
97 *
98 * v1.8a May 28, 2000 - Minor updates.
99 *
100 * v1.9 July 25, 2000 - Fixed a few remaining Full-Duplex issues.
101 * - Updated with timer fixes from Andrew Morton.
102 * - Fixed module race in TLan_Open.
103 * - Added routine to monitor PHY status.
104 * - Added activity led support for Proliant devices.
105 *
106 * v1.10 Aug 30, 2000 - Added support for EISA based tlan controllers
107 * like the Compaq NetFlex3/E.
108 * - Rewrote tlan_probe to better handle multiple
109 * bus probes. Probing and device setup is now
110 * done through TLan_Probe and TLan_init_one. Actual
111 * hardware probe is done with kernel API and
112 * TLan_EisaProbe.
113 * - Adjusted debug information for probing.
114 * - Fixed bug that would cause general debug information
115 * to be printed after driver removal.
116 * - Added transmit timeout handling.
117 * - Fixed OOM return values in tlan_probe.
118 * - Fixed possible mem leak in tlan_exit
119 * (now tlan_remove_one).
120 * - Fixed timer bug in TLan_phyMonitor.
121 * - This driver version is alpha quality, please
122 * send me any bug issues you may encounter.
123 *
124 * v1.11 Aug 31, 2000 - Do not try to register irq 0 if no irq line was
125 * set for EISA cards.
126 * - Added support for NetFlex3/E with nibble-rate
127 * 10Base-T PHY. This is untestet as I haven't got
128 * one of these cards.
129 * - Fixed timer being added twice.
130 * - Disabled PhyMonitoring by default as this is
131 * work in progress. Define MONITOR to enable it.
132 * - Now we don't display link info with PHYs that
133 * doesn't support it (level1).
134 * - Incresed tx_timeout beacuse of auto-neg.
135 * - Adjusted timers for forced speeds.
136 *
137 * v1.12 Oct 12, 2000 - Minor fixes (memleak, init, etc.)
138 *
139 * v1.13 Nov 28, 2000 - Stop flooding console with auto-neg issues
140 * when link can't be established.
141 * - Added the bbuf option as a kernel parameter.
142 * - Fixed ioaddr probe bug.
143 * - Fixed stupid deadlock with MII interrupts.
144 * - Added support for speed/duplex selection with
145 * multiple nics.
146 * - Added partly fix for TX Channel lockup with
147 * TLAN v1.0 silicon. This needs to be investigated
148 * further.
149 *
150 * v1.14 Dec 16, 2000 - Added support for servicing multiple frames per.
151 * interrupt. Thanks goes to
152 * Adam Keys <adam@ti.com>
153 * Denis Beaudoin <dbeaudoin@ti.com>
154 * for providing the patch.
155 * - Fixed auto-neg output when using multiple
156 * adapters.
157 * - Converted to use new taskq interface.
158 *
159 * v1.14a Jan 6, 2001 - Minor adjustments (spinlocks, etc.)
160 *
161 * Samuel Chessman <chessman@tux.org> New Maintainer!
162 *
163 * v1.15 Apr 4, 2002 - Correct operation when aui=1 to be
164 * 10T half duplex no loopback
165 * Thanks to Gunnar Eikman
166 *
167 * Sakari Ailus <sakari.ailus@iki.fi>:
168 *
169 * v1.15a Dec 15 2008 - Remove bbuf support, it doesn't work anyway.
170 *
171 *******************************************************************************/
172 31
173#include <linux/module.h> 32#include <linux/module.h>
174#include <linux/init.h> 33#include <linux/init.h>
@@ -185,13 +44,11 @@
185 44
186#include "tlan.h" 45#include "tlan.h"
187 46
188typedef u32 (TLanIntVectorFunc)( struct net_device *, u16 );
189
190 47
191/* For removing EISA devices */ 48/* For removing EISA devices */
192static struct net_device *TLan_Eisa_Devices; 49static struct net_device *tlan_eisa_devices;
193 50
194static int TLanDevicesInstalled; 51static int tlan_devices_installed;
195 52
196/* Set speed, duplex and aui settings */ 53/* Set speed, duplex and aui settings */
197static int aui[MAX_TLAN_BOARDS]; 54static int aui[MAX_TLAN_BOARDS];
@@ -202,8 +59,9 @@ module_param_array(aui, int, NULL, 0);
202module_param_array(duplex, int, NULL, 0); 59module_param_array(duplex, int, NULL, 0);
203module_param_array(speed, int, NULL, 0); 60module_param_array(speed, int, NULL, 0);
204MODULE_PARM_DESC(aui, "ThunderLAN use AUI port(s) (0-1)"); 61MODULE_PARM_DESC(aui, "ThunderLAN use AUI port(s) (0-1)");
205MODULE_PARM_DESC(duplex, "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)"); 62MODULE_PARM_DESC(duplex,
206MODULE_PARM_DESC(speed, "ThunderLAN port speen setting(s) (0,10,100)"); 63 "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)");
64MODULE_PARM_DESC(speed, "ThunderLAN port speed setting(s) (0,10,100)");
207 65
208MODULE_AUTHOR("Maintainer: Samuel Chessman <chessman@tux.org>"); 66MODULE_AUTHOR("Maintainer: Samuel Chessman <chessman@tux.org>");
209MODULE_DESCRIPTION("Driver for TI ThunderLAN based ethernet PCI adapters"); 67MODULE_DESCRIPTION("Driver for TI ThunderLAN based ethernet PCI adapters");
@@ -218,139 +76,144 @@ static int debug;
218module_param(debug, int, 0); 76module_param(debug, int, 0);
219MODULE_PARM_DESC(debug, "ThunderLAN debug mask"); 77MODULE_PARM_DESC(debug, "ThunderLAN debug mask");
220 78
221static const char TLanSignature[] = "TLAN"; 79static const char tlan_signature[] = "TLAN";
222static const char tlan_banner[] = "ThunderLAN driver v1.15a\n"; 80static const char tlan_banner[] = "ThunderLAN driver v1.17\n";
223static int tlan_have_pci; 81static int tlan_have_pci;
224static int tlan_have_eisa; 82static int tlan_have_eisa;
225 83
226static const char *media[] = { 84static const char * const media[] = {
227 "10BaseT-HD ", "10BaseT-FD ","100baseTx-HD ", 85 "10BaseT-HD", "10BaseT-FD", "100baseTx-HD",
228 "100baseTx-FD", "100baseT4", NULL 86 "100BaseTx-FD", "100BaseT4", NULL
229}; 87};
230 88
231static struct board { 89static struct board {
232 const char *deviceLabel; 90 const char *device_label;
233 u32 flags; 91 u32 flags;
234 u16 addrOfs; 92 u16 addr_ofs;
235} board_info[] = { 93} board_info[] = {
236 { "Compaq Netelligent 10 T PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, 94 { "Compaq Netelligent 10 T PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
237 { "Compaq Netelligent 10/100 TX PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, 95 { "Compaq Netelligent 10/100 TX PCI UTP",
96 TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
238 { "Compaq Integrated NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 }, 97 { "Compaq Integrated NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
239 { "Compaq NetFlex-3/P", 98 { "Compaq NetFlex-3/P",
240 TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 }, 99 TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
241 { "Compaq NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 }, 100 { "Compaq NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
242 { "Compaq Netelligent Integrated 10/100 TX UTP", 101 { "Compaq Netelligent Integrated 10/100 TX UTP",
243 TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, 102 TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
244 { "Compaq Netelligent Dual 10/100 TX PCI UTP", TLAN_ADAPTER_NONE, 0x83 }, 103 { "Compaq Netelligent Dual 10/100 TX PCI UTP",
245 { "Compaq Netelligent 10/100 TX Embedded UTP", TLAN_ADAPTER_NONE, 0x83 }, 104 TLAN_ADAPTER_NONE, 0x83 },
105 { "Compaq Netelligent 10/100 TX Embedded UTP",
106 TLAN_ADAPTER_NONE, 0x83 },
246 { "Olicom OC-2183/2185", TLAN_ADAPTER_USE_INTERN_10, 0x83 }, 107 { "Olicom OC-2183/2185", TLAN_ADAPTER_USE_INTERN_10, 0x83 },
247 { "Olicom OC-2325", TLAN_ADAPTER_UNMANAGED_PHY, 0xF8 }, 108 { "Olicom OC-2325", TLAN_ADAPTER_UNMANAGED_PHY, 0xf8 },
248 { "Olicom OC-2326", TLAN_ADAPTER_USE_INTERN_10, 0xF8 }, 109 { "Olicom OC-2326", TLAN_ADAPTER_USE_INTERN_10, 0xf8 },
249 { "Compaq Netelligent 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, 110 { "Compaq Netelligent 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
250 { "Compaq Netelligent 10 T/2 PCI UTP/Coax", TLAN_ADAPTER_NONE, 0x83 }, 111 { "Compaq Netelligent 10 T/2 PCI UTP/coax", TLAN_ADAPTER_NONE, 0x83 },
251 { "Compaq NetFlex-3/E", 112 { "Compaq NetFlex-3/E",
252 TLAN_ADAPTER_ACTIVITY_LED | /* EISA card */ 113 TLAN_ADAPTER_ACTIVITY_LED | /* EISA card */
253 TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 }, 114 TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
254 { "Compaq NetFlex-3/E", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */ 115 { "Compaq NetFlex-3/E",
116 TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */
255}; 117};
256 118
257static DEFINE_PCI_DEVICE_TABLE(tlan_pci_tbl) = { 119static DEFINE_PCI_DEVICE_TABLE(tlan_pci_tbl) = {
258 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10, 120 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10,
259 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
260 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100, 122 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100,
261 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, 123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
262 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3I, 124 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3I,
263 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 }, 125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
264 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_THUNDER, 126 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_THUNDER,
265 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 }, 127 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
266 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3B, 128 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3B,
267 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 }, 129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
268 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100PI, 130 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100PI,
269 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 }, 131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
270 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100D, 132 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100D,
271 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 }, 133 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
272 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100I, 134 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100I,
273 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 }, 135 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
274 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2183, 136 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2183,
275 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 }, 137 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
276 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2325, 138 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2325,
277 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 }, 139 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
278 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2326, 140 { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2326,
279 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 }, 141 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 },
280 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_100_WS_5100, 142 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_100_WS_5100,
281 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 }, 143 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 },
282 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_T2, 144 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_T2,
283 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 }, 145 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 },
284 { 0,} 146 { 0,}
285}; 147};
286MODULE_DEVICE_TABLE(pci, tlan_pci_tbl); 148MODULE_DEVICE_TABLE(pci, tlan_pci_tbl);
287 149
288static void TLan_EisaProbe( void ); 150static void tlan_eisa_probe(void);
289static void TLan_Eisa_Cleanup( void ); 151static void tlan_eisa_cleanup(void);
290static int TLan_Init( struct net_device * ); 152static int tlan_init(struct net_device *);
291static int TLan_Open( struct net_device *dev ); 153static int tlan_open(struct net_device *dev);
292static netdev_tx_t TLan_StartTx( struct sk_buff *, struct net_device *); 154static netdev_tx_t tlan_start_tx(struct sk_buff *, struct net_device *);
293static irqreturn_t TLan_HandleInterrupt( int, void *); 155static irqreturn_t tlan_handle_interrupt(int, void *);
294static int TLan_Close( struct net_device *); 156static int tlan_close(struct net_device *);
295static struct net_device_stats *TLan_GetStats( struct net_device *); 157static struct net_device_stats *tlan_get_stats(struct net_device *);
296static void TLan_SetMulticastList( struct net_device *); 158static void tlan_set_multicast_list(struct net_device *);
297static int TLan_ioctl( struct net_device *dev, struct ifreq *rq, int cmd); 159static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
298static int TLan_probe1( struct pci_dev *pdev, long ioaddr, 160static int tlan_probe1(struct pci_dev *pdev, long ioaddr,
299 int irq, int rev, const struct pci_device_id *ent); 161 int irq, int rev, const struct pci_device_id *ent);
300static void TLan_tx_timeout( struct net_device *dev); 162static void tlan_tx_timeout(struct net_device *dev);
301static void TLan_tx_timeout_work(struct work_struct *work); 163static void tlan_tx_timeout_work(struct work_struct *work);
302static int tlan_init_one( struct pci_dev *pdev, const struct pci_device_id *ent); 164static int tlan_init_one(struct pci_dev *pdev,
303 165 const struct pci_device_id *ent);
304static u32 TLan_HandleTxEOF( struct net_device *, u16 ); 166
305static u32 TLan_HandleStatOverflow( struct net_device *, u16 ); 167static u32 tlan_handle_tx_eof(struct net_device *, u16);
306static u32 TLan_HandleRxEOF( struct net_device *, u16 ); 168static u32 tlan_handle_stat_overflow(struct net_device *, u16);
307static u32 TLan_HandleDummy( struct net_device *, u16 ); 169static u32 tlan_handle_rx_eof(struct net_device *, u16);
308static u32 TLan_HandleTxEOC( struct net_device *, u16 ); 170static u32 tlan_handle_dummy(struct net_device *, u16);
309static u32 TLan_HandleStatusCheck( struct net_device *, u16 ); 171static u32 tlan_handle_tx_eoc(struct net_device *, u16);
310static u32 TLan_HandleRxEOC( struct net_device *, u16 ); 172static u32 tlan_handle_status_check(struct net_device *, u16);
311 173static u32 tlan_handle_rx_eoc(struct net_device *, u16);
312static void TLan_Timer( unsigned long ); 174
313 175static void tlan_timer(unsigned long);
314static void TLan_ResetLists( struct net_device * ); 176
315static void TLan_FreeLists( struct net_device * ); 177static void tlan_reset_lists(struct net_device *);
316static void TLan_PrintDio( u16 ); 178static void tlan_free_lists(struct net_device *);
317static void TLan_PrintList( TLanList *, char *, int ); 179static void tlan_print_dio(u16);
318static void TLan_ReadAndClearStats( struct net_device *, int ); 180static void tlan_print_list(struct tlan_list *, char *, int);
319static void TLan_ResetAdapter( struct net_device * ); 181static void tlan_read_and_clear_stats(struct net_device *, int);
320static void TLan_FinishReset( struct net_device * ); 182static void tlan_reset_adapter(struct net_device *);
321static void TLan_SetMac( struct net_device *, int areg, char *mac ); 183static void tlan_finish_reset(struct net_device *);
322 184static void tlan_set_mac(struct net_device *, int areg, char *mac);
323static void TLan_PhyPrint( struct net_device * ); 185
324static void TLan_PhyDetect( struct net_device * ); 186static void tlan_phy_print(struct net_device *);
325static void TLan_PhyPowerDown( struct net_device * ); 187static void tlan_phy_detect(struct net_device *);
326static void TLan_PhyPowerUp( struct net_device * ); 188static void tlan_phy_power_down(struct net_device *);
327static void TLan_PhyReset( struct net_device * ); 189static void tlan_phy_power_up(struct net_device *);
328static void TLan_PhyStartLink( struct net_device * ); 190static void tlan_phy_reset(struct net_device *);
329static void TLan_PhyFinishAutoNeg( struct net_device * ); 191static void tlan_phy_start_link(struct net_device *);
192static void tlan_phy_finish_auto_neg(struct net_device *);
330#ifdef MONITOR 193#ifdef MONITOR
331static void TLan_PhyMonitor( struct net_device * ); 194static void tlan_phy_monitor(struct net_device *);
332#endif 195#endif
333 196
334/* 197/*
335static int TLan_PhyNop( struct net_device * ); 198 static int tlan_phy_nop(struct net_device *);
336static int TLan_PhyInternalCheck( struct net_device * ); 199 static int tlan_phy_internal_check(struct net_device *);
337static int TLan_PhyInternalService( struct net_device * ); 200 static int tlan_phy_internal_service(struct net_device *);
338static int TLan_PhyDp83840aCheck( struct net_device * ); 201 static int tlan_phy_dp83840a_check(struct net_device *);
339*/ 202*/
340 203
341static bool TLan_MiiReadReg( struct net_device *, u16, u16, u16 * ); 204static bool tlan_mii_read_reg(struct net_device *, u16, u16, u16 *);
342static void TLan_MiiSendData( u16, u32, unsigned ); 205static void tlan_mii_send_data(u16, u32, unsigned);
343static void TLan_MiiSync( u16 ); 206static void tlan_mii_sync(u16);
344static void TLan_MiiWriteReg( struct net_device *, u16, u16, u16 ); 207static void tlan_mii_write_reg(struct net_device *, u16, u16, u16);
345 208
346static void TLan_EeSendStart( u16 ); 209static void tlan_ee_send_start(u16);
347static int TLan_EeSendByte( u16, u8, int ); 210static int tlan_ee_send_byte(u16, u8, int);
348static void TLan_EeReceiveByte( u16, u8 *, int ); 211static void tlan_ee_receive_byte(u16, u8 *, int);
349static int TLan_EeReadByte( struct net_device *, u8, u8 * ); 212static int tlan_ee_read_byte(struct net_device *, u8, u8 *);
350 213
351 214
352static inline void 215static inline void
353TLan_StoreSKB( struct tlan_list_tag *tag, struct sk_buff *skb) 216tlan_store_skb(struct tlan_list *tag, struct sk_buff *skb)
354{ 217{
355 unsigned long addr = (unsigned long)skb; 218 unsigned long addr = (unsigned long)skb;
356 tag->buffer[9].address = addr; 219 tag->buffer[9].address = addr;
@@ -358,7 +221,7 @@ TLan_StoreSKB( struct tlan_list_tag *tag, struct sk_buff *skb)
358} 221}
359 222
360static inline struct sk_buff * 223static inline struct sk_buff *
361TLan_GetSKB( const struct tlan_list_tag *tag) 224tlan_get_skb(const struct tlan_list *tag)
362{ 225{
363 unsigned long addr; 226 unsigned long addr;
364 227
@@ -367,50 +230,50 @@ TLan_GetSKB( const struct tlan_list_tag *tag)
367 return (struct sk_buff *) addr; 230 return (struct sk_buff *) addr;
368} 231}
369 232
370 233static u32
371static TLanIntVectorFunc *TLanIntVector[TLAN_INT_NUMBER_OF_INTS] = { 234(*tlan_int_vector[TLAN_INT_NUMBER_OF_INTS])(struct net_device *, u16) = {
372 NULL, 235 NULL,
373 TLan_HandleTxEOF, 236 tlan_handle_tx_eof,
374 TLan_HandleStatOverflow, 237 tlan_handle_stat_overflow,
375 TLan_HandleRxEOF, 238 tlan_handle_rx_eof,
376 TLan_HandleDummy, 239 tlan_handle_dummy,
377 TLan_HandleTxEOC, 240 tlan_handle_tx_eoc,
378 TLan_HandleStatusCheck, 241 tlan_handle_status_check,
379 TLan_HandleRxEOC 242 tlan_handle_rx_eoc
380}; 243};
381 244
382static inline void 245static inline void
383TLan_SetTimer( struct net_device *dev, u32 ticks, u32 type ) 246tlan_set_timer(struct net_device *dev, u32 ticks, u32 type)
384{ 247{
385 TLanPrivateInfo *priv = netdev_priv(dev); 248 struct tlan_priv *priv = netdev_priv(dev);
386 unsigned long flags = 0; 249 unsigned long flags = 0;
387 250
388 if (!in_irq()) 251 if (!in_irq())
389 spin_lock_irqsave(&priv->lock, flags); 252 spin_lock_irqsave(&priv->lock, flags);
390 if ( priv->timer.function != NULL && 253 if (priv->timer.function != NULL &&
391 priv->timerType != TLAN_TIMER_ACTIVITY ) { 254 priv->timer_type != TLAN_TIMER_ACTIVITY) {
392 if (!in_irq()) 255 if (!in_irq())
393 spin_unlock_irqrestore(&priv->lock, flags); 256 spin_unlock_irqrestore(&priv->lock, flags);
394 return; 257 return;
395 } 258 }
396 priv->timer.function = TLan_Timer; 259 priv->timer.function = tlan_timer;
397 if (!in_irq()) 260 if (!in_irq())
398 spin_unlock_irqrestore(&priv->lock, flags); 261 spin_unlock_irqrestore(&priv->lock, flags);
399 262
400 priv->timer.data = (unsigned long) dev; 263 priv->timer.data = (unsigned long) dev;
401 priv->timerSetAt = jiffies; 264 priv->timer_set_at = jiffies;
402 priv->timerType = type; 265 priv->timer_type = type;
403 mod_timer(&priv->timer, jiffies + ticks); 266 mod_timer(&priv->timer, jiffies + ticks);
404 267
405} /* TLan_SetTimer */ 268}
406 269
407 270
408/***************************************************************************** 271/*****************************************************************************
409****************************************************************************** 272******************************************************************************
410 273
411 ThunderLAN Driver Primary Functions 274ThunderLAN driver primary functions
412 275
413 These functions are more or less common to all Linux network drivers. 276these functions are more or less common to all linux network drivers.
414 277
415****************************************************************************** 278******************************************************************************
416*****************************************************************************/ 279*****************************************************************************/
@@ -419,56 +282,124 @@ TLan_SetTimer( struct net_device *dev, u32 ticks, u32 type )
419 282
420 283
421 284
422 /*************************************************************** 285/***************************************************************
423 * tlan_remove_one 286 * tlan_remove_one
424 * 287 *
425 * Returns: 288 * Returns:
426 * Nothing 289 * Nothing
427 * Parms: 290 * Parms:
428 * None 291 * None
429 * 292 *
430 * Goes through the TLanDevices list and frees the device 293 * Goes through the TLanDevices list and frees the device
431 * structs and memory associated with each device (lists 294 * structs and memory associated with each device (lists
432 * and buffers). It also ureserves the IO port regions 295 * and buffers). It also ureserves the IO port regions
433 * associated with this device. 296 * associated with this device.
434 * 297 *
435 **************************************************************/ 298 **************************************************************/
436 299
437 300
438static void __devexit tlan_remove_one( struct pci_dev *pdev) 301static void __devexit tlan_remove_one(struct pci_dev *pdev)
439{ 302{
440 struct net_device *dev = pci_get_drvdata( pdev ); 303 struct net_device *dev = pci_get_drvdata(pdev);
441 TLanPrivateInfo *priv = netdev_priv(dev); 304 struct tlan_priv *priv = netdev_priv(dev);
442 305
443 unregister_netdev( dev ); 306 unregister_netdev(dev);
444 307
445 if ( priv->dmaStorage ) { 308 if (priv->dma_storage) {
446 pci_free_consistent(priv->pciDev, 309 pci_free_consistent(priv->pci_dev,
447 priv->dmaSize, priv->dmaStorage, 310 priv->dma_size, priv->dma_storage,
448 priv->dmaStorageDMA ); 311 priv->dma_storage_dma);
449 } 312 }
450 313
451#ifdef CONFIG_PCI 314#ifdef CONFIG_PCI
452 pci_release_regions(pdev); 315 pci_release_regions(pdev);
453#endif 316#endif
454 317
455 free_netdev( dev ); 318 free_netdev(dev);
456 319
457 pci_set_drvdata( pdev, NULL ); 320 pci_set_drvdata(pdev, NULL);
458} 321}
459 322
323static void tlan_start(struct net_device *dev)
324{
325 tlan_reset_lists(dev);
326 /* NOTE: It might not be necessary to read the stats before a
327 reset if you don't care what the values are.
328 */
329 tlan_read_and_clear_stats(dev, TLAN_IGNORE);
330 tlan_reset_adapter(dev);
331 netif_wake_queue(dev);
332}
333
334static void tlan_stop(struct net_device *dev)
335{
336 struct tlan_priv *priv = netdev_priv(dev);
337
338 tlan_read_and_clear_stats(dev, TLAN_RECORD);
339 outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
340 /* Reset and power down phy */
341 tlan_reset_adapter(dev);
342 if (priv->timer.function != NULL) {
343 del_timer_sync(&priv->timer);
344 priv->timer.function = NULL;
345 }
346}
347
348#ifdef CONFIG_PM
349
350static int tlan_suspend(struct pci_dev *pdev, pm_message_t state)
351{
352 struct net_device *dev = pci_get_drvdata(pdev);
353
354 if (netif_running(dev))
355 tlan_stop(dev);
356
357 netif_device_detach(dev);
358 pci_save_state(pdev);
359 pci_disable_device(pdev);
360 pci_wake_from_d3(pdev, false);
361 pci_set_power_state(pdev, PCI_D3hot);
362
363 return 0;
364}
365
366static int tlan_resume(struct pci_dev *pdev)
367{
368 struct net_device *dev = pci_get_drvdata(pdev);
369
370 pci_set_power_state(pdev, PCI_D0);
371 pci_restore_state(pdev);
372 pci_enable_wake(pdev, 0, 0);
373 netif_device_attach(dev);
374
375 if (netif_running(dev))
376 tlan_start(dev);
377
378 return 0;
379}
380
381#else /* CONFIG_PM */
382
383#define tlan_suspend NULL
384#define tlan_resume NULL
385
386#endif /* CONFIG_PM */
387
388
460static struct pci_driver tlan_driver = { 389static struct pci_driver tlan_driver = {
461 .name = "tlan", 390 .name = "tlan",
462 .id_table = tlan_pci_tbl, 391 .id_table = tlan_pci_tbl,
463 .probe = tlan_init_one, 392 .probe = tlan_init_one,
464 .remove = __devexit_p(tlan_remove_one), 393 .remove = __devexit_p(tlan_remove_one),
394 .suspend = tlan_suspend,
395 .resume = tlan_resume,
465}; 396};
466 397
467static int __init tlan_probe(void) 398static int __init tlan_probe(void)
468{ 399{
469 int rc = -ENODEV; 400 int rc = -ENODEV;
470 401
471 printk(KERN_INFO "%s", tlan_banner); 402 pr_info("%s", tlan_banner);
472 403
473 TLAN_DBG(TLAN_DEBUG_PROBE, "Starting PCI Probe....\n"); 404 TLAN_DBG(TLAN_DEBUG_PROBE, "Starting PCI Probe....\n");
474 405
@@ -477,18 +408,18 @@ static int __init tlan_probe(void)
477 rc = pci_register_driver(&tlan_driver); 408 rc = pci_register_driver(&tlan_driver);
478 409
479 if (rc != 0) { 410 if (rc != 0) {
480 printk(KERN_ERR "TLAN: Could not register pci driver.\n"); 411 pr_err("Could not register pci driver\n");
481 goto err_out_pci_free; 412 goto err_out_pci_free;
482 } 413 }
483 414
484 TLAN_DBG(TLAN_DEBUG_PROBE, "Starting EISA Probe....\n"); 415 TLAN_DBG(TLAN_DEBUG_PROBE, "Starting EISA Probe....\n");
485 TLan_EisaProbe(); 416 tlan_eisa_probe();
486 417
487 printk(KERN_INFO "TLAN: %d device%s installed, PCI: %d EISA: %d\n", 418 pr_info("%d device%s installed, PCI: %d EISA: %d\n",
488 TLanDevicesInstalled, TLanDevicesInstalled == 1 ? "" : "s", 419 tlan_devices_installed, tlan_devices_installed == 1 ? "" : "s",
489 tlan_have_pci, tlan_have_eisa); 420 tlan_have_pci, tlan_have_eisa);
490 421
491 if (TLanDevicesInstalled == 0) { 422 if (tlan_devices_installed == 0) {
492 rc = -ENODEV; 423 rc = -ENODEV;
493 goto err_out_pci_unreg; 424 goto err_out_pci_unreg;
494 } 425 }
@@ -501,39 +432,39 @@ err_out_pci_free:
501} 432}
502 433
503 434
504static int __devinit tlan_init_one( struct pci_dev *pdev, 435static int __devinit tlan_init_one(struct pci_dev *pdev,
505 const struct pci_device_id *ent) 436 const struct pci_device_id *ent)
506{ 437{
507 return TLan_probe1( pdev, -1, -1, 0, ent); 438 return tlan_probe1(pdev, -1, -1, 0, ent);
508} 439}
509 440
510 441
511/* 442/*
512 *************************************************************** 443***************************************************************
513 * tlan_probe1 444* tlan_probe1
514 * 445*
515 * Returns: 446* Returns:
516 * 0 on success, error code on error 447* 0 on success, error code on error
517 * Parms: 448* Parms:
518 * none 449* none
519 * 450*
520 * The name is lower case to fit in with all the rest of 451* The name is lower case to fit in with all the rest of
521 * the netcard_probe names. This function looks for 452* the netcard_probe names. This function looks for
522 * another TLan based adapter, setting it up with the 453* another TLan based adapter, setting it up with the
523 * allocated device struct if one is found. 454* allocated device struct if one is found.
524 * tlan_probe has been ported to the new net API and 455* tlan_probe has been ported to the new net API and
525 * now allocates its own device structure. This function 456* now allocates its own device structure. This function
526 * is also used by modules. 457* is also used by modules.
527 * 458*
528 **************************************************************/ 459**************************************************************/
529 460
530static int __devinit TLan_probe1(struct pci_dev *pdev, 461static int __devinit tlan_probe1(struct pci_dev *pdev,
531 long ioaddr, int irq, int rev, 462 long ioaddr, int irq, int rev,
532 const struct pci_device_id *ent ) 463 const struct pci_device_id *ent)
533{ 464{
534 465
535 struct net_device *dev; 466 struct net_device *dev;
536 TLanPrivateInfo *priv; 467 struct tlan_priv *priv;
537 u16 device_id; 468 u16 device_id;
538 int reg, rc = -ENODEV; 469 int reg, rc = -ENODEV;
539 470
@@ -543,17 +474,17 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
543 if (rc) 474 if (rc)
544 return rc; 475 return rc;
545 476
546 rc = pci_request_regions(pdev, TLanSignature); 477 rc = pci_request_regions(pdev, tlan_signature);
547 if (rc) { 478 if (rc) {
548 printk(KERN_ERR "TLAN: Could not reserve IO regions\n"); 479 pr_err("Could not reserve IO regions\n");
549 goto err_out; 480 goto err_out;
550 } 481 }
551 } 482 }
552#endif /* CONFIG_PCI */ 483#endif /* CONFIG_PCI */
553 484
554 dev = alloc_etherdev(sizeof(TLanPrivateInfo)); 485 dev = alloc_etherdev(sizeof(struct tlan_priv));
555 if (dev == NULL) { 486 if (dev == NULL) {
556 printk(KERN_ERR "TLAN: Could not allocate memory for device.\n"); 487 pr_err("Could not allocate memory for device\n");
557 rc = -ENOMEM; 488 rc = -ENOMEM;
558 goto err_out_regions; 489 goto err_out_regions;
559 } 490 }
@@ -561,38 +492,39 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
561 492
562 priv = netdev_priv(dev); 493 priv = netdev_priv(dev);
563 494
564 priv->pciDev = pdev; 495 priv->pci_dev = pdev;
565 priv->dev = dev; 496 priv->dev = dev;
566 497
567 /* Is this a PCI device? */ 498 /* Is this a PCI device? */
568 if (pdev) { 499 if (pdev) {
569 u32 pci_io_base = 0; 500 u32 pci_io_base = 0;
570 501
571 priv->adapter = &board_info[ent->driver_data]; 502 priv->adapter = &board_info[ent->driver_data];
572 503
573 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 504 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
574 if (rc) { 505 if (rc) {
575 printk(KERN_ERR "TLAN: No suitable PCI mapping available.\n"); 506 pr_err("No suitable PCI mapping available\n");
576 goto err_out_free_dev; 507 goto err_out_free_dev;
577 } 508 }
578 509
579 for ( reg= 0; reg <= 5; reg ++ ) { 510 for (reg = 0; reg <= 5; reg++) {
580 if (pci_resource_flags(pdev, reg) & IORESOURCE_IO) { 511 if (pci_resource_flags(pdev, reg) & IORESOURCE_IO) {
581 pci_io_base = pci_resource_start(pdev, reg); 512 pci_io_base = pci_resource_start(pdev, reg);
582 TLAN_DBG( TLAN_DEBUG_GNRL, "IO mapping is available at %x.\n", 513 TLAN_DBG(TLAN_DEBUG_GNRL,
583 pci_io_base); 514 "IO mapping is available at %x.\n",
515 pci_io_base);
584 break; 516 break;
585 } 517 }
586 } 518 }
587 if (!pci_io_base) { 519 if (!pci_io_base) {
588 printk(KERN_ERR "TLAN: No IO mappings available\n"); 520 pr_err("No IO mappings available\n");
589 rc = -EIO; 521 rc = -EIO;
590 goto err_out_free_dev; 522 goto err_out_free_dev;
591 } 523 }
592 524
593 dev->base_addr = pci_io_base; 525 dev->base_addr = pci_io_base;
594 dev->irq = pdev->irq; 526 dev->irq = pdev->irq;
595 priv->adapterRev = pdev->revision; 527 priv->adapter_rev = pdev->revision;
596 pci_set_master(pdev); 528 pci_set_master(pdev);
597 pci_set_drvdata(pdev, dev); 529 pci_set_drvdata(pdev, dev);
598 530
@@ -602,11 +534,11 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
602 device_id = inw(ioaddr + EISA_ID2); 534 device_id = inw(ioaddr + EISA_ID2);
603 priv->is_eisa = 1; 535 priv->is_eisa = 1;
604 if (device_id == 0x20F1) { 536 if (device_id == 0x20F1) {
605 priv->adapter = &board_info[13]; /* NetFlex-3/E */ 537 priv->adapter = &board_info[13]; /* NetFlex-3/E */
606 priv->adapterRev = 23; /* TLAN 2.3 */ 538 priv->adapter_rev = 23; /* TLAN 2.3 */
607 } else { 539 } else {
608 priv->adapter = &board_info[14]; 540 priv->adapter = &board_info[14];
609 priv->adapterRev = 10; /* TLAN 1.0 */ 541 priv->adapter_rev = 10; /* TLAN 1.0 */
610 } 542 }
611 dev->base_addr = ioaddr; 543 dev->base_addr = ioaddr;
612 dev->irq = irq; 544 dev->irq = irq;
@@ -620,11 +552,11 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
620 priv->speed = ((dev->mem_start & 0x18) == 0x18) ? 0 552 priv->speed = ((dev->mem_start & 0x18) == 0x18) ? 0
621 : (dev->mem_start & 0x18) >> 3; 553 : (dev->mem_start & 0x18) >> 3;
622 554
623 if (priv->speed == 0x1) { 555 if (priv->speed == 0x1)
624 priv->speed = TLAN_SPEED_10; 556 priv->speed = TLAN_SPEED_10;
625 } else if (priv->speed == 0x2) { 557 else if (priv->speed == 0x2)
626 priv->speed = TLAN_SPEED_100; 558 priv->speed = TLAN_SPEED_100;
627 } 559
628 debug = priv->debug = dev->mem_end; 560 debug = priv->debug = dev->mem_end;
629 } else { 561 } else {
630 priv->aui = aui[boards_found]; 562 priv->aui = aui[boards_found];
@@ -635,46 +567,45 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
635 567
636 /* This will be used when we get an adapter error from 568 /* This will be used when we get an adapter error from
637 * within our irq handler */ 569 * within our irq handler */
638 INIT_WORK(&priv->tlan_tqueue, TLan_tx_timeout_work); 570 INIT_WORK(&priv->tlan_tqueue, tlan_tx_timeout_work);
639 571
640 spin_lock_init(&priv->lock); 572 spin_lock_init(&priv->lock);
641 573
642 rc = TLan_Init(dev); 574 rc = tlan_init(dev);
643 if (rc) { 575 if (rc) {
644 printk(KERN_ERR "TLAN: Could not set up device.\n"); 576 pr_err("Could not set up device\n");
645 goto err_out_free_dev; 577 goto err_out_free_dev;
646 } 578 }
647 579
648 rc = register_netdev(dev); 580 rc = register_netdev(dev);
649 if (rc) { 581 if (rc) {
650 printk(KERN_ERR "TLAN: Could not register device.\n"); 582 pr_err("Could not register device\n");
651 goto err_out_uninit; 583 goto err_out_uninit;
652 } 584 }
653 585
654 586
655 TLanDevicesInstalled++; 587 tlan_devices_installed++;
656 boards_found++; 588 boards_found++;
657 589
658 /* pdev is NULL if this is an EISA device */ 590 /* pdev is NULL if this is an EISA device */
659 if (pdev) 591 if (pdev)
660 tlan_have_pci++; 592 tlan_have_pci++;
661 else { 593 else {
662 priv->nextDevice = TLan_Eisa_Devices; 594 priv->next_device = tlan_eisa_devices;
663 TLan_Eisa_Devices = dev; 595 tlan_eisa_devices = dev;
664 tlan_have_eisa++; 596 tlan_have_eisa++;
665 } 597 }
666 598
667 printk(KERN_INFO "TLAN: %s irq=%2d, io=%04x, %s, Rev. %d\n", 599 netdev_info(dev, "irq=%2d, io=%04x, %s, Rev. %d\n",
668 dev->name, 600 (int)dev->irq,
669 (int) dev->irq, 601 (int)dev->base_addr,
670 (int) dev->base_addr, 602 priv->adapter->device_label,
671 priv->adapter->deviceLabel, 603 priv->adapter_rev);
672 priv->adapterRev);
673 return 0; 604 return 0;
674 605
675err_out_uninit: 606err_out_uninit:
676 pci_free_consistent(priv->pciDev, priv->dmaSize, priv->dmaStorage, 607 pci_free_consistent(priv->pci_dev, priv->dma_size, priv->dma_storage,
677 priv->dmaStorageDMA ); 608 priv->dma_storage_dma);
678err_out_free_dev: 609err_out_free_dev:
679 free_netdev(dev); 610 free_netdev(dev);
680err_out_regions: 611err_out_regions:
@@ -689,22 +620,23 @@ err_out:
689} 620}
690 621
691 622
692static void TLan_Eisa_Cleanup(void) 623static void tlan_eisa_cleanup(void)
693{ 624{
694 struct net_device *dev; 625 struct net_device *dev;
695 TLanPrivateInfo *priv; 626 struct tlan_priv *priv;
696 627
697 while( tlan_have_eisa ) { 628 while (tlan_have_eisa) {
698 dev = TLan_Eisa_Devices; 629 dev = tlan_eisa_devices;
699 priv = netdev_priv(dev); 630 priv = netdev_priv(dev);
700 if (priv->dmaStorage) { 631 if (priv->dma_storage) {
701 pci_free_consistent(priv->pciDev, priv->dmaSize, 632 pci_free_consistent(priv->pci_dev, priv->dma_size,
702 priv->dmaStorage, priv->dmaStorageDMA ); 633 priv->dma_storage,
634 priv->dma_storage_dma);
703 } 635 }
704 release_region( dev->base_addr, 0x10); 636 release_region(dev->base_addr, 0x10);
705 unregister_netdev( dev ); 637 unregister_netdev(dev);
706 TLan_Eisa_Devices = priv->nextDevice; 638 tlan_eisa_devices = priv->next_device;
707 free_netdev( dev ); 639 free_netdev(dev);
708 tlan_have_eisa--; 640 tlan_have_eisa--;
709 } 641 }
710} 642}
@@ -715,7 +647,7 @@ static void __exit tlan_exit(void)
715 pci_unregister_driver(&tlan_driver); 647 pci_unregister_driver(&tlan_driver);
716 648
717 if (tlan_have_eisa) 649 if (tlan_have_eisa)
718 TLan_Eisa_Cleanup(); 650 tlan_eisa_cleanup();
719 651
720} 652}
721 653
@@ -726,24 +658,24 @@ module_exit(tlan_exit);
726 658
727 659
728 660
729 /************************************************************** 661/**************************************************************
730 * TLan_EisaProbe 662 * tlan_eisa_probe
731 * 663 *
732 * Returns: 0 on success, 1 otherwise 664 * Returns: 0 on success, 1 otherwise
733 * 665 *
734 * Parms: None 666 * Parms: None
735 * 667 *
736 * 668 *
737 * This functions probes for EISA devices and calls 669 * This functions probes for EISA devices and calls
738 * TLan_probe1 when one is found. 670 * TLan_probe1 when one is found.
739 * 671 *
740 *************************************************************/ 672 *************************************************************/
741 673
742static void __init TLan_EisaProbe (void) 674static void __init tlan_eisa_probe(void)
743{ 675{
744 long ioaddr; 676 long ioaddr;
745 int rc = -ENODEV; 677 int rc = -ENODEV;
746 int irq; 678 int irq;
747 u16 device_id; 679 u16 device_id;
748 680
749 if (!EISA_bus) { 681 if (!EISA_bus) {
@@ -754,15 +686,16 @@ static void __init TLan_EisaProbe (void)
754 /* Loop through all slots of the EISA bus */ 686 /* Loop through all slots of the EISA bus */
755 for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) { 687 for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
756 688
757 TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n", 689 TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
758 (int) ioaddr + 0xC80, inw(ioaddr + EISA_ID)); 690 (int) ioaddr + 0xc80, inw(ioaddr + EISA_ID));
759 TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n", 691 TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
760 (int) ioaddr + 0xC82, inw(ioaddr + EISA_ID2)); 692 (int) ioaddr + 0xc82, inw(ioaddr + EISA_ID2));
761 693
762 694
763 TLAN_DBG(TLAN_DEBUG_PROBE, "Probing for EISA adapter at IO: 0x%4x : ", 695 TLAN_DBG(TLAN_DEBUG_PROBE,
764 (int) ioaddr); 696 "Probing for EISA adapter at IO: 0x%4x : ",
765 if (request_region(ioaddr, 0x10, TLanSignature) == NULL) 697 (int) ioaddr);
698 if (request_region(ioaddr, 0x10, tlan_signature) == NULL)
766 goto out; 699 goto out;
767 700
768 if (inw(ioaddr + EISA_ID) != 0x110E) { 701 if (inw(ioaddr + EISA_ID) != 0x110E) {
@@ -772,326 +705,324 @@ static void __init TLan_EisaProbe (void)
772 705
773 device_id = inw(ioaddr + EISA_ID2); 706 device_id = inw(ioaddr + EISA_ID2);
774 if (device_id != 0x20F1 && device_id != 0x40F1) { 707 if (device_id != 0x20F1 && device_id != 0x40F1) {
775 release_region (ioaddr, 0x10); 708 release_region(ioaddr, 0x10);
776 goto out; 709 goto out;
777 } 710 }
778 711
779 if (inb(ioaddr + EISA_CR) != 0x1) { /* Check if adapter is enabled */ 712 /* check if adapter is enabled */
780 release_region (ioaddr, 0x10); 713 if (inb(ioaddr + EISA_CR) != 0x1) {
714 release_region(ioaddr, 0x10);
781 goto out2; 715 goto out2;
782 } 716 }
783 717
784 if (debug == 0x10) 718 if (debug == 0x10)
785 printk("Found one\n"); 719 pr_info("Found one\n");
786 720
787 721
788 /* Get irq from board */ 722 /* Get irq from board */
789 switch (inb(ioaddr + 0xCC0)) { 723 switch (inb(ioaddr + 0xcc0)) {
790 case(0x10): 724 case(0x10):
791 irq=5; 725 irq = 5;
792 break; 726 break;
793 case(0x20): 727 case(0x20):
794 irq=9; 728 irq = 9;
795 break; 729 break;
796 case(0x40): 730 case(0x40):
797 irq=10; 731 irq = 10;
798 break; 732 break;
799 case(0x80): 733 case(0x80):
800 irq=11; 734 irq = 11;
801 break; 735 break;
802 default: 736 default:
803 goto out; 737 goto out;
804 } 738 }
805 739
806 740
807 /* Setup the newly found eisa adapter */ 741 /* Setup the newly found eisa adapter */
808 rc = TLan_probe1( NULL, ioaddr, irq, 742 rc = tlan_probe1(NULL, ioaddr, irq,
809 12, NULL); 743 12, NULL);
810 continue; 744 continue;
811 745
812 out: 746out:
813 if (debug == 0x10) 747 if (debug == 0x10)
814 printk("None found\n"); 748 pr_info("None found\n");
815 continue; 749 continue;
816 750
817 out2: if (debug == 0x10) 751out2:
818 printk("Card found but it is not enabled, skipping\n"); 752 if (debug == 0x10)
819 continue; 753 pr_info("Card found but it is not enabled, skipping\n");
754 continue;
820 755
821 } 756 }
822 757
823} /* TLan_EisaProbe */ 758}
824 759
825#ifdef CONFIG_NET_POLL_CONTROLLER 760#ifdef CONFIG_NET_POLL_CONTROLLER
826static void TLan_Poll(struct net_device *dev) 761static void tlan_poll(struct net_device *dev)
827{ 762{
828 disable_irq(dev->irq); 763 disable_irq(dev->irq);
829 TLan_HandleInterrupt(dev->irq, dev); 764 tlan_handle_interrupt(dev->irq, dev);
830 enable_irq(dev->irq); 765 enable_irq(dev->irq);
831} 766}
832#endif 767#endif
833 768
834static const struct net_device_ops TLan_netdev_ops = { 769static const struct net_device_ops tlan_netdev_ops = {
835 .ndo_open = TLan_Open, 770 .ndo_open = tlan_open,
836 .ndo_stop = TLan_Close, 771 .ndo_stop = tlan_close,
837 .ndo_start_xmit = TLan_StartTx, 772 .ndo_start_xmit = tlan_start_tx,
838 .ndo_tx_timeout = TLan_tx_timeout, 773 .ndo_tx_timeout = tlan_tx_timeout,
839 .ndo_get_stats = TLan_GetStats, 774 .ndo_get_stats = tlan_get_stats,
840 .ndo_set_multicast_list = TLan_SetMulticastList, 775 .ndo_set_multicast_list = tlan_set_multicast_list,
841 .ndo_do_ioctl = TLan_ioctl, 776 .ndo_do_ioctl = tlan_ioctl,
842 .ndo_change_mtu = eth_change_mtu, 777 .ndo_change_mtu = eth_change_mtu,
843 .ndo_set_mac_address = eth_mac_addr, 778 .ndo_set_mac_address = eth_mac_addr,
844 .ndo_validate_addr = eth_validate_addr, 779 .ndo_validate_addr = eth_validate_addr,
845#ifdef CONFIG_NET_POLL_CONTROLLER 780#ifdef CONFIG_NET_POLL_CONTROLLER
846 .ndo_poll_controller = TLan_Poll, 781 .ndo_poll_controller = tlan_poll,
847#endif 782#endif
848}; 783};
849 784
850 785
851 786
852 /*************************************************************** 787/***************************************************************
853 * TLan_Init 788 * tlan_init
854 * 789 *
855 * Returns: 790 * Returns:
856 * 0 on success, error code otherwise. 791 * 0 on success, error code otherwise.
857 * Parms: 792 * Parms:
858 * dev The structure of the device to be 793 * dev The structure of the device to be
859 * init'ed. 794 * init'ed.
860 * 795 *
861 * This function completes the initialization of the 796 * This function completes the initialization of the
862 * device structure and driver. It reserves the IO 797 * device structure and driver. It reserves the IO
863 * addresses, allocates memory for the lists and bounce 798 * addresses, allocates memory for the lists and bounce
864 * buffers, retrieves the MAC address from the eeprom 799 * buffers, retrieves the MAC address from the eeprom
865 * and assignes the device's methods. 800 * and assignes the device's methods.
866 * 801 *
867 **************************************************************/ 802 **************************************************************/
868 803
869static int TLan_Init( struct net_device *dev ) 804static int tlan_init(struct net_device *dev)
870{ 805{
871 int dma_size; 806 int dma_size;
872 int err; 807 int err;
873 int i; 808 int i;
874 TLanPrivateInfo *priv; 809 struct tlan_priv *priv;
875 810
876 priv = netdev_priv(dev); 811 priv = netdev_priv(dev);
877 812
878 dma_size = ( TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS ) 813 dma_size = (TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS)
879 * ( sizeof(TLanList) ); 814 * (sizeof(struct tlan_list));
880 priv->dmaStorage = pci_alloc_consistent(priv->pciDev, 815 priv->dma_storage = pci_alloc_consistent(priv->pci_dev,
881 dma_size, &priv->dmaStorageDMA); 816 dma_size,
882 priv->dmaSize = dma_size; 817 &priv->dma_storage_dma);
818 priv->dma_size = dma_size;
883 819
884 if ( priv->dmaStorage == NULL ) { 820 if (priv->dma_storage == NULL) {
885 printk(KERN_ERR "TLAN: Could not allocate lists and buffers for %s.\n", 821 pr_err("Could not allocate lists and buffers for %s\n",
886 dev->name ); 822 dev->name);
887 return -ENOMEM; 823 return -ENOMEM;
888 } 824 }
889 memset( priv->dmaStorage, 0, dma_size ); 825 memset(priv->dma_storage, 0, dma_size);
890 priv->rxList = (TLanList *) ALIGN((unsigned long)priv->dmaStorage, 8); 826 priv->rx_list = (struct tlan_list *)
891 priv->rxListDMA = ALIGN(priv->dmaStorageDMA, 8); 827 ALIGN((unsigned long)priv->dma_storage, 8);
892 priv->txList = priv->rxList + TLAN_NUM_RX_LISTS; 828 priv->rx_list_dma = ALIGN(priv->dma_storage_dma, 8);
893 priv->txListDMA = priv->rxListDMA + sizeof(TLanList) * TLAN_NUM_RX_LISTS; 829 priv->tx_list = priv->rx_list + TLAN_NUM_RX_LISTS;
830 priv->tx_list_dma =
831 priv->rx_list_dma + sizeof(struct tlan_list)*TLAN_NUM_RX_LISTS;
894 832
895 err = 0; 833 err = 0;
896 for ( i = 0; i < 6 ; i++ ) 834 for (i = 0; i < 6 ; i++)
897 err |= TLan_EeReadByte( dev, 835 err |= tlan_ee_read_byte(dev,
898 (u8) priv->adapter->addrOfs + i, 836 (u8) priv->adapter->addr_ofs + i,
899 (u8 *) &dev->dev_addr[i] ); 837 (u8 *) &dev->dev_addr[i]);
900 if ( err ) { 838 if (err) {
901 printk(KERN_ERR "TLAN: %s: Error reading MAC from eeprom: %d\n", 839 pr_err("%s: Error reading MAC from eeprom: %d\n",
902 dev->name, 840 dev->name, err);
903 err );
904 } 841 }
905 dev->addr_len = 6; 842 dev->addr_len = 6;
906 843
907 netif_carrier_off(dev); 844 netif_carrier_off(dev);
908 845
909 /* Device methods */ 846 /* Device methods */
910 dev->netdev_ops = &TLan_netdev_ops; 847 dev->netdev_ops = &tlan_netdev_ops;
911 dev->watchdog_timeo = TX_TIMEOUT; 848 dev->watchdog_timeo = TX_TIMEOUT;
912 849
913 return 0; 850 return 0;
914 851
915} /* TLan_Init */ 852}
916 853
917 854
918 855
919 856
920 /*************************************************************** 857/***************************************************************
921 * TLan_Open 858 * tlan_open
922 * 859 *
923 * Returns: 860 * Returns:
924 * 0 on success, error code otherwise. 861 * 0 on success, error code otherwise.
925 * Parms: 862 * Parms:
926 * dev Structure of device to be opened. 863 * dev Structure of device to be opened.
927 * 864 *
928 * This routine puts the driver and TLAN adapter in a 865 * This routine puts the driver and TLAN adapter in a
929 * state where it is ready to send and receive packets. 866 * state where it is ready to send and receive packets.
930 * It allocates the IRQ, resets and brings the adapter 867 * It allocates the IRQ, resets and brings the adapter
931 * out of reset, and allows interrupts. It also delays 868 * out of reset, and allows interrupts. It also delays
932 * the startup for autonegotiation or sends a Rx GO 869 * the startup for autonegotiation or sends a Rx GO
933 * command to the adapter, as appropriate. 870 * command to the adapter, as appropriate.
934 * 871 *
935 **************************************************************/ 872 **************************************************************/
936 873
937static int TLan_Open( struct net_device *dev ) 874static int tlan_open(struct net_device *dev)
938{ 875{
939 TLanPrivateInfo *priv = netdev_priv(dev); 876 struct tlan_priv *priv = netdev_priv(dev);
940 int err; 877 int err;
941 878
942 priv->tlanRev = TLan_DioRead8( dev->base_addr, TLAN_DEF_REVISION ); 879 priv->tlan_rev = tlan_dio_read8(dev->base_addr, TLAN_DEF_REVISION);
943 err = request_irq( dev->irq, TLan_HandleInterrupt, IRQF_SHARED, 880 err = request_irq(dev->irq, tlan_handle_interrupt, IRQF_SHARED,
944 dev->name, dev ); 881 dev->name, dev);
945 882
946 if ( err ) { 883 if (err) {
947 pr_err("TLAN: Cannot open %s because IRQ %d is already in use.\n", 884 netdev_err(dev, "Cannot open because IRQ %d is already in use\n",
948 dev->name, dev->irq ); 885 dev->irq);
949 return err; 886 return err;
950 } 887 }
951 888
952 init_timer(&priv->timer); 889 init_timer(&priv->timer);
953 netif_start_queue(dev);
954 890
955 /* NOTE: It might not be necessary to read the stats before a 891 tlan_start(dev);
956 reset if you don't care what the values are.
957 */
958 TLan_ResetLists( dev );
959 TLan_ReadAndClearStats( dev, TLAN_IGNORE );
960 TLan_ResetAdapter( dev );
961 892
962 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Opened. TLAN Chip Rev: %x\n", 893 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Opened. TLAN Chip Rev: %x\n",
963 dev->name, priv->tlanRev ); 894 dev->name, priv->tlan_rev);
964 895
965 return 0; 896 return 0;
966 897
967} /* TLan_Open */ 898}
968 899
969 900
970 901
971 /************************************************************** 902/**************************************************************
972 * TLan_ioctl 903 * tlan_ioctl
973 * 904 *
974 * Returns: 905 * Returns:
975 * 0 on success, error code otherwise 906 * 0 on success, error code otherwise
976 * Params: 907 * Params:
977 * dev structure of device to receive ioctl. 908 * dev structure of device to receive ioctl.
978 * 909 *
979 * rq ifreq structure to hold userspace data. 910 * rq ifreq structure to hold userspace data.
980 * 911 *
981 * cmd ioctl command. 912 * cmd ioctl command.
982 * 913 *
983 * 914 *
984 *************************************************************/ 915 *************************************************************/
985 916
986static int TLan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 917static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
987{ 918{
988 TLanPrivateInfo *priv = netdev_priv(dev); 919 struct tlan_priv *priv = netdev_priv(dev);
989 struct mii_ioctl_data *data = if_mii(rq); 920 struct mii_ioctl_data *data = if_mii(rq);
990 u32 phy = priv->phy[priv->phyNum]; 921 u32 phy = priv->phy[priv->phy_num];
991 922
992 if (!priv->phyOnline) 923 if (!priv->phy_online)
993 return -EAGAIN; 924 return -EAGAIN;
994 925
995 switch(cmd) { 926 switch (cmd) {
996 case SIOCGMIIPHY: /* Get address of MII PHY in use. */ 927 case SIOCGMIIPHY: /* get address of MII PHY in use. */
997 data->phy_id = phy; 928 data->phy_id = phy;
998 929
999 930
1000 case SIOCGMIIREG: /* Read MII PHY register. */ 931 case SIOCGMIIREG: /* read MII PHY register. */
1001 TLan_MiiReadReg(dev, data->phy_id & 0x1f, 932 tlan_mii_read_reg(dev, data->phy_id & 0x1f,
1002 data->reg_num & 0x1f, &data->val_out); 933 data->reg_num & 0x1f, &data->val_out);
1003 return 0; 934 return 0;
1004 935
1005 936
1006 case SIOCSMIIREG: /* Write MII PHY register. */ 937 case SIOCSMIIREG: /* write MII PHY register. */
1007 TLan_MiiWriteReg(dev, data->phy_id & 0x1f, 938 tlan_mii_write_reg(dev, data->phy_id & 0x1f,
1008 data->reg_num & 0x1f, data->val_in); 939 data->reg_num & 0x1f, data->val_in);
1009 return 0; 940 return 0;
1010 default: 941 default:
1011 return -EOPNOTSUPP; 942 return -EOPNOTSUPP;
1012 } 943 }
1013} /* tlan_ioctl */ 944}
1014 945
1015 946
1016 /*************************************************************** 947/***************************************************************
1017 * TLan_tx_timeout 948 * tlan_tx_timeout
1018 * 949 *
1019 * Returns: nothing 950 * Returns: nothing
1020 * 951 *
1021 * Params: 952 * Params:
1022 * dev structure of device which timed out 953 * dev structure of device which timed out
1023 * during transmit. 954 * during transmit.
1024 * 955 *
1025 **************************************************************/ 956 **************************************************************/
1026 957
1027static void TLan_tx_timeout(struct net_device *dev) 958static void tlan_tx_timeout(struct net_device *dev)
1028{ 959{
1029 960
1030 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name); 961 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name);
1031 962
1032 /* Ok so we timed out, lets see what we can do about it...*/ 963 /* Ok so we timed out, lets see what we can do about it...*/
1033 TLan_FreeLists( dev ); 964 tlan_free_lists(dev);
1034 TLan_ResetLists( dev ); 965 tlan_reset_lists(dev);
1035 TLan_ReadAndClearStats( dev, TLAN_IGNORE ); 966 tlan_read_and_clear_stats(dev, TLAN_IGNORE);
1036 TLan_ResetAdapter( dev ); 967 tlan_reset_adapter(dev);
1037 dev->trans_start = jiffies; /* prevent tx timeout */ 968 dev->trans_start = jiffies; /* prevent tx timeout */
1038 netif_wake_queue( dev ); 969 netif_wake_queue(dev);
1039 970
1040} 971}
1041 972
1042 973
1043 /*************************************************************** 974/***************************************************************
1044 * TLan_tx_timeout_work 975 * tlan_tx_timeout_work
1045 * 976 *
1046 * Returns: nothing 977 * Returns: nothing
1047 * 978 *
1048 * Params: 979 * Params:
1049 * work work item of device which timed out 980 * work work item of device which timed out
1050 * 981 *
1051 **************************************************************/ 982 **************************************************************/
1052 983
1053static void TLan_tx_timeout_work(struct work_struct *work) 984static void tlan_tx_timeout_work(struct work_struct *work)
1054{ 985{
1055 TLanPrivateInfo *priv = 986 struct tlan_priv *priv =
1056 container_of(work, TLanPrivateInfo, tlan_tqueue); 987 container_of(work, struct tlan_priv, tlan_tqueue);
1057 988
1058 TLan_tx_timeout(priv->dev); 989 tlan_tx_timeout(priv->dev);
1059} 990}
1060 991
1061 992
1062 993
1063 /*************************************************************** 994/***************************************************************
1064 * TLan_StartTx 995 * tlan_start_tx
1065 * 996 *
1066 * Returns: 997 * Returns:
1067 * 0 on success, non-zero on failure. 998 * 0 on success, non-zero on failure.
1068 * Parms: 999 * Parms:
1069 * skb A pointer to the sk_buff containing the 1000 * skb A pointer to the sk_buff containing the
1070 * frame to be sent. 1001 * frame to be sent.
1071 * dev The device to send the data on. 1002 * dev The device to send the data on.
1072 * 1003 *
1073 * This function adds a frame to the Tx list to be sent 1004 * This function adds a frame to the Tx list to be sent
1074 * ASAP. First it verifies that the adapter is ready and 1005 * ASAP. First it verifies that the adapter is ready and
1075 * there is room in the queue. Then it sets up the next 1006 * there is room in the queue. Then it sets up the next
1076 * available list, copies the frame to the corresponding 1007 * available list, copies the frame to the corresponding
1077 * buffer. If the adapter Tx channel is idle, it gives 1008 * buffer. If the adapter Tx channel is idle, it gives
1078 * the adapter a Tx Go command on the list, otherwise it 1009 * the adapter a Tx Go command on the list, otherwise it
1079 * sets the forward address of the previous list to point 1010 * sets the forward address of the previous list to point
1080 * to this one. Then it frees the sk_buff. 1011 * to this one. Then it frees the sk_buff.
1081 * 1012 *
1082 **************************************************************/ 1013 **************************************************************/
1083 1014
1084static netdev_tx_t TLan_StartTx( struct sk_buff *skb, struct net_device *dev ) 1015static netdev_tx_t tlan_start_tx(struct sk_buff *skb, struct net_device *dev)
1085{ 1016{
1086 TLanPrivateInfo *priv = netdev_priv(dev); 1017 struct tlan_priv *priv = netdev_priv(dev);
1087 dma_addr_t tail_list_phys; 1018 dma_addr_t tail_list_phys;
1088 TLanList *tail_list; 1019 struct tlan_list *tail_list;
1089 unsigned long flags; 1020 unsigned long flags;
1090 unsigned int txlen; 1021 unsigned int txlen;
1091 1022
1092 if ( ! priv->phyOnline ) { 1023 if (!priv->phy_online) {
1093 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n", 1024 TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n",
1094 dev->name ); 1025 dev->name);
1095 dev_kfree_skb_any(skb); 1026 dev_kfree_skb_any(skb);
1096 return NETDEV_TX_OK; 1027 return NETDEV_TX_OK;
1097 } 1028 }
@@ -1100,218 +1031,214 @@ static netdev_tx_t TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
1100 return NETDEV_TX_OK; 1031 return NETDEV_TX_OK;
1101 txlen = max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE); 1032 txlen = max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE);
1102 1033
1103 tail_list = priv->txList + priv->txTail; 1034 tail_list = priv->tx_list + priv->tx_tail;
1104 tail_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txTail; 1035 tail_list_phys =
1036 priv->tx_list_dma + sizeof(struct tlan_list)*priv->tx_tail;
1105 1037
1106 if ( tail_list->cStat != TLAN_CSTAT_UNUSED ) { 1038 if (tail_list->c_stat != TLAN_CSTAT_UNUSED) {
1107 TLAN_DBG( TLAN_DEBUG_TX, 1039 TLAN_DBG(TLAN_DEBUG_TX,
1108 "TRANSMIT: %s is busy (Head=%d Tail=%d)\n", 1040 "TRANSMIT: %s is busy (Head=%d Tail=%d)\n",
1109 dev->name, priv->txHead, priv->txTail ); 1041 dev->name, priv->tx_head, priv->tx_tail);
1110 netif_stop_queue(dev); 1042 netif_stop_queue(dev);
1111 priv->txBusyCount++; 1043 priv->tx_busy_count++;
1112 return NETDEV_TX_BUSY; 1044 return NETDEV_TX_BUSY;
1113 } 1045 }
1114 1046
1115 tail_list->forward = 0; 1047 tail_list->forward = 0;
1116 1048
1117 tail_list->buffer[0].address = pci_map_single(priv->pciDev, 1049 tail_list->buffer[0].address = pci_map_single(priv->pci_dev,
1118 skb->data, txlen, 1050 skb->data, txlen,
1119 PCI_DMA_TODEVICE); 1051 PCI_DMA_TODEVICE);
1120 TLan_StoreSKB(tail_list, skb); 1052 tlan_store_skb(tail_list, skb);
1121 1053
1122 tail_list->frameSize = (u16) txlen; 1054 tail_list->frame_size = (u16) txlen;
1123 tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) txlen; 1055 tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) txlen;
1124 tail_list->buffer[1].count = 0; 1056 tail_list->buffer[1].count = 0;
1125 tail_list->buffer[1].address = 0; 1057 tail_list->buffer[1].address = 0;
1126 1058
1127 spin_lock_irqsave(&priv->lock, flags); 1059 spin_lock_irqsave(&priv->lock, flags);
1128 tail_list->cStat = TLAN_CSTAT_READY; 1060 tail_list->c_stat = TLAN_CSTAT_READY;
1129 if ( ! priv->txInProgress ) { 1061 if (!priv->tx_in_progress) {
1130 priv->txInProgress = 1; 1062 priv->tx_in_progress = 1;
1131 TLAN_DBG( TLAN_DEBUG_TX, 1063 TLAN_DBG(TLAN_DEBUG_TX,
1132 "TRANSMIT: Starting TX on buffer %d\n", priv->txTail ); 1064 "TRANSMIT: Starting TX on buffer %d\n",
1133 outl( tail_list_phys, dev->base_addr + TLAN_CH_PARM ); 1065 priv->tx_tail);
1134 outl( TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD ); 1066 outl(tail_list_phys, dev->base_addr + TLAN_CH_PARM);
1067 outl(TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD);
1135 } else { 1068 } else {
1136 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Adding buffer %d to TX channel\n", 1069 TLAN_DBG(TLAN_DEBUG_TX,
1137 priv->txTail ); 1070 "TRANSMIT: Adding buffer %d to TX channel\n",
1138 if ( priv->txTail == 0 ) { 1071 priv->tx_tail);
1139 ( priv->txList + ( TLAN_NUM_TX_LISTS - 1 ) )->forward 1072 if (priv->tx_tail == 0) {
1073 (priv->tx_list + (TLAN_NUM_TX_LISTS - 1))->forward
1140 = tail_list_phys; 1074 = tail_list_phys;
1141 } else { 1075 } else {
1142 ( priv->txList + ( priv->txTail - 1 ) )->forward 1076 (priv->tx_list + (priv->tx_tail - 1))->forward
1143 = tail_list_phys; 1077 = tail_list_phys;
1144 } 1078 }
1145 } 1079 }
1146 spin_unlock_irqrestore(&priv->lock, flags); 1080 spin_unlock_irqrestore(&priv->lock, flags);
1147 1081
1148 CIRC_INC( priv->txTail, TLAN_NUM_TX_LISTS ); 1082 CIRC_INC(priv->tx_tail, TLAN_NUM_TX_LISTS);
1149 1083
1150 return NETDEV_TX_OK; 1084 return NETDEV_TX_OK;
1151 1085
1152} /* TLan_StartTx */ 1086}
1153 1087
1154 1088
1155 1089
1156 1090
1157 /*************************************************************** 1091/***************************************************************
1158 * TLan_HandleInterrupt 1092 * tlan_handle_interrupt
1159 * 1093 *
1160 * Returns: 1094 * Returns:
1161 * Nothing 1095 * Nothing
1162 * Parms: 1096 * Parms:
1163 * irq The line on which the interrupt 1097 * irq The line on which the interrupt
1164 * occurred. 1098 * occurred.
1165 * dev_id A pointer to the device assigned to 1099 * dev_id A pointer to the device assigned to
1166 * this irq line. 1100 * this irq line.
1167 * 1101 *
1168 * This function handles an interrupt generated by its 1102 * This function handles an interrupt generated by its
1169 * assigned TLAN adapter. The function deactivates 1103 * assigned TLAN adapter. The function deactivates
1170 * interrupts on its adapter, records the type of 1104 * interrupts on its adapter, records the type of
1171 * interrupt, executes the appropriate subhandler, and 1105 * interrupt, executes the appropriate subhandler, and
1172 * acknowdges the interrupt to the adapter (thus 1106 * acknowdges the interrupt to the adapter (thus
1173 * re-enabling adapter interrupts. 1107 * re-enabling adapter interrupts.
1174 * 1108 *
1175 **************************************************************/ 1109 **************************************************************/
1176 1110
1177static irqreturn_t TLan_HandleInterrupt(int irq, void *dev_id) 1111static irqreturn_t tlan_handle_interrupt(int irq, void *dev_id)
1178{ 1112{
1179 struct net_device *dev = dev_id; 1113 struct net_device *dev = dev_id;
1180 TLanPrivateInfo *priv = netdev_priv(dev); 1114 struct tlan_priv *priv = netdev_priv(dev);
1181 u16 host_int; 1115 u16 host_int;
1182 u16 type; 1116 u16 type;
1183 1117
1184 spin_lock(&priv->lock); 1118 spin_lock(&priv->lock);
1185 1119
1186 host_int = inw( dev->base_addr + TLAN_HOST_INT ); 1120 host_int = inw(dev->base_addr + TLAN_HOST_INT);
1187 type = ( host_int & TLAN_HI_IT_MASK ) >> 2; 1121 type = (host_int & TLAN_HI_IT_MASK) >> 2;
1188 if ( type ) { 1122 if (type) {
1189 u32 ack; 1123 u32 ack;
1190 u32 host_cmd; 1124 u32 host_cmd;
1191 1125
1192 outw( host_int, dev->base_addr + TLAN_HOST_INT ); 1126 outw(host_int, dev->base_addr + TLAN_HOST_INT);
1193 ack = TLanIntVector[type]( dev, host_int ); 1127 ack = tlan_int_vector[type](dev, host_int);
1194 1128
1195 if ( ack ) { 1129 if (ack) {
1196 host_cmd = TLAN_HC_ACK | ack | ( type << 18 ); 1130 host_cmd = TLAN_HC_ACK | ack | (type << 18);
1197 outl( host_cmd, dev->base_addr + TLAN_HOST_CMD ); 1131 outl(host_cmd, dev->base_addr + TLAN_HOST_CMD);
1198 } 1132 }
1199 } 1133 }
1200 1134
1201 spin_unlock(&priv->lock); 1135 spin_unlock(&priv->lock);
1202 1136
1203 return IRQ_RETVAL(type); 1137 return IRQ_RETVAL(type);
1204} /* TLan_HandleInterrupts */ 1138}
1205 1139
1206 1140
1207 1141
1208 1142
1209 /*************************************************************** 1143/***************************************************************
1210 * TLan_Close 1144 * tlan_close
1211 * 1145 *
1212 * Returns: 1146 * Returns:
1213 * An error code. 1147 * An error code.
1214 * Parms: 1148 * Parms:
1215 * dev The device structure of the device to 1149 * dev The device structure of the device to
1216 * close. 1150 * close.
1217 * 1151 *
1218 * This function shuts down the adapter. It records any 1152 * This function shuts down the adapter. It records any
1219 * stats, puts the adapter into reset state, deactivates 1153 * stats, puts the adapter into reset state, deactivates
1220 * its time as needed, and frees the irq it is using. 1154 * its time as needed, and frees the irq it is using.
1221 * 1155 *
1222 **************************************************************/ 1156 **************************************************************/
1223 1157
1224static int TLan_Close(struct net_device *dev) 1158static int tlan_close(struct net_device *dev)
1225{ 1159{
1226 TLanPrivateInfo *priv = netdev_priv(dev); 1160 struct tlan_priv *priv = netdev_priv(dev);
1227 1161
1228 netif_stop_queue(dev);
1229 priv->neg_be_verbose = 0; 1162 priv->neg_be_verbose = 0;
1163 tlan_stop(dev);
1230 1164
1231 TLan_ReadAndClearStats( dev, TLAN_RECORD ); 1165 free_irq(dev->irq, dev);
1232 outl( TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD ); 1166 tlan_free_lists(dev);
1233 if ( priv->timer.function != NULL ) { 1167 TLAN_DBG(TLAN_DEBUG_GNRL, "Device %s closed.\n", dev->name);
1234 del_timer_sync( &priv->timer );
1235 priv->timer.function = NULL;
1236 }
1237
1238 free_irq( dev->irq, dev );
1239 TLan_FreeLists( dev );
1240 TLAN_DBG( TLAN_DEBUG_GNRL, "Device %s closed.\n", dev->name );
1241 1168
1242 return 0; 1169 return 0;
1243 1170
1244} /* TLan_Close */ 1171}
1245 1172
1246 1173
1247 1174
1248 1175
1249 /*************************************************************** 1176/***************************************************************
1250 * TLan_GetStats 1177 * tlan_get_stats
1251 * 1178 *
1252 * Returns: 1179 * Returns:
1253 * A pointer to the device's statistics structure. 1180 * A pointer to the device's statistics structure.
1254 * Parms: 1181 * Parms:
1255 * dev The device structure to return the 1182 * dev The device structure to return the
1256 * stats for. 1183 * stats for.
1257 * 1184 *
1258 * This function updates the devices statistics by reading 1185 * This function updates the devices statistics by reading
1259 * the TLAN chip's onboard registers. Then it returns the 1186 * the TLAN chip's onboard registers. Then it returns the
1260 * address of the statistics structure. 1187 * address of the statistics structure.
1261 * 1188 *
1262 **************************************************************/ 1189 **************************************************************/
1263 1190
1264static struct net_device_stats *TLan_GetStats( struct net_device *dev ) 1191static struct net_device_stats *tlan_get_stats(struct net_device *dev)
1265{ 1192{
1266 TLanPrivateInfo *priv = netdev_priv(dev); 1193 struct tlan_priv *priv = netdev_priv(dev);
1267 int i; 1194 int i;
1268 1195
1269 /* Should only read stats if open ? */ 1196 /* Should only read stats if open ? */
1270 TLan_ReadAndClearStats( dev, TLAN_RECORD ); 1197 tlan_read_and_clear_stats(dev, TLAN_RECORD);
1271 1198
1272 TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: %s EOC count = %d\n", dev->name, 1199 TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: %s EOC count = %d\n", dev->name,
1273 priv->rxEocCount ); 1200 priv->rx_eoc_count);
1274 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s Busy count = %d\n", dev->name, 1201 TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s Busy count = %d\n", dev->name,
1275 priv->txBusyCount ); 1202 priv->tx_busy_count);
1276 if ( debug & TLAN_DEBUG_GNRL ) { 1203 if (debug & TLAN_DEBUG_GNRL) {
1277 TLan_PrintDio( dev->base_addr ); 1204 tlan_print_dio(dev->base_addr);
1278 TLan_PhyPrint( dev ); 1205 tlan_phy_print(dev);
1279 } 1206 }
1280 if ( debug & TLAN_DEBUG_LIST ) { 1207 if (debug & TLAN_DEBUG_LIST) {
1281 for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) 1208 for (i = 0; i < TLAN_NUM_RX_LISTS; i++)
1282 TLan_PrintList( priv->rxList + i, "RX", i ); 1209 tlan_print_list(priv->rx_list + i, "RX", i);
1283 for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) 1210 for (i = 0; i < TLAN_NUM_TX_LISTS; i++)
1284 TLan_PrintList( priv->txList + i, "TX", i ); 1211 tlan_print_list(priv->tx_list + i, "TX", i);
1285 } 1212 }
1286 1213
1287 return &dev->stats; 1214 return &dev->stats;
1288 1215
1289} /* TLan_GetStats */ 1216}
1290 1217
1291 1218
1292 1219
1293 1220
1294 /*************************************************************** 1221/***************************************************************
1295 * TLan_SetMulticastList 1222 * tlan_set_multicast_list
1296 * 1223 *
1297 * Returns: 1224 * Returns:
1298 * Nothing 1225 * Nothing
1299 * Parms: 1226 * Parms:
1300 * dev The device structure to set the 1227 * dev The device structure to set the
1301 * multicast list for. 1228 * multicast list for.
1302 * 1229 *
1303 * This function sets the TLAN adaptor to various receive 1230 * This function sets the TLAN adaptor to various receive
1304 * modes. If the IFF_PROMISC flag is set, promiscuous 1231 * modes. If the IFF_PROMISC flag is set, promiscuous
1305 * mode is acitviated. Otherwise, promiscuous mode is 1232 * mode is acitviated. Otherwise, promiscuous mode is
1306 * turned off. If the IFF_ALLMULTI flag is set, then 1233 * turned off. If the IFF_ALLMULTI flag is set, then
1307 * the hash table is set to receive all group addresses. 1234 * the hash table is set to receive all group addresses.
1308 * Otherwise, the first three multicast addresses are 1235 * Otherwise, the first three multicast addresses are
1309 * stored in AREG_1-3, and the rest are selected via the 1236 * stored in AREG_1-3, and the rest are selected via the
1310 * hash table, as necessary. 1237 * hash table, as necessary.
1311 * 1238 *
1312 **************************************************************/ 1239 **************************************************************/
1313 1240
1314static void TLan_SetMulticastList( struct net_device *dev ) 1241static void tlan_set_multicast_list(struct net_device *dev)
1315{ 1242{
1316 struct netdev_hw_addr *ha; 1243 struct netdev_hw_addr *ha;
1317 u32 hash1 = 0; 1244 u32 hash1 = 0;
@@ -1320,53 +1247,56 @@ static void TLan_SetMulticastList( struct net_device *dev )
1320 u32 offset; 1247 u32 offset;
1321 u8 tmp; 1248 u8 tmp;
1322 1249
1323 if ( dev->flags & IFF_PROMISC ) { 1250 if (dev->flags & IFF_PROMISC) {
1324 tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD ); 1251 tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
1325 TLan_DioWrite8( dev->base_addr, 1252 tlan_dio_write8(dev->base_addr,
1326 TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF ); 1253 TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF);
1327 } else { 1254 } else {
1328 tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD ); 1255 tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
1329 TLan_DioWrite8( dev->base_addr, 1256 tlan_dio_write8(dev->base_addr,
1330 TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF ); 1257 TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF);
1331 if ( dev->flags & IFF_ALLMULTI ) { 1258 if (dev->flags & IFF_ALLMULTI) {
1332 for ( i = 0; i < 3; i++ ) 1259 for (i = 0; i < 3; i++)
1333 TLan_SetMac( dev, i + 1, NULL ); 1260 tlan_set_mac(dev, i + 1, NULL);
1334 TLan_DioWrite32( dev->base_addr, TLAN_HASH_1, 0xFFFFFFFF ); 1261 tlan_dio_write32(dev->base_addr, TLAN_HASH_1,
1335 TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, 0xFFFFFFFF ); 1262 0xffffffff);
1263 tlan_dio_write32(dev->base_addr, TLAN_HASH_2,
1264 0xffffffff);
1336 } else { 1265 } else {
1337 i = 0; 1266 i = 0;
1338 netdev_for_each_mc_addr(ha, dev) { 1267 netdev_for_each_mc_addr(ha, dev) {
1339 if ( i < 3 ) { 1268 if (i < 3) {
1340 TLan_SetMac( dev, i + 1, 1269 tlan_set_mac(dev, i + 1,
1341 (char *) &ha->addr); 1270 (char *) &ha->addr);
1342 } else { 1271 } else {
1343 offset = TLan_HashFunc((u8 *)&ha->addr); 1272 offset =
1344 if ( offset < 32 ) 1273 tlan_hash_func((u8 *)&ha->addr);
1345 hash1 |= ( 1 << offset ); 1274 if (offset < 32)
1275 hash1 |= (1 << offset);
1346 else 1276 else
1347 hash2 |= ( 1 << ( offset - 32 ) ); 1277 hash2 |= (1 << (offset - 32));
1348 } 1278 }
1349 i++; 1279 i++;
1350 } 1280 }
1351 for ( ; i < 3; i++ ) 1281 for ( ; i < 3; i++)
1352 TLan_SetMac( dev, i + 1, NULL ); 1282 tlan_set_mac(dev, i + 1, NULL);
1353 TLan_DioWrite32( dev->base_addr, TLAN_HASH_1, hash1 ); 1283 tlan_dio_write32(dev->base_addr, TLAN_HASH_1, hash1);
1354 TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, hash2 ); 1284 tlan_dio_write32(dev->base_addr, TLAN_HASH_2, hash2);
1355 } 1285 }
1356 } 1286 }
1357 1287
1358} /* TLan_SetMulticastList */ 1288}
1359 1289
1360 1290
1361 1291
1362/***************************************************************************** 1292/*****************************************************************************
1363****************************************************************************** 1293******************************************************************************
1364 1294
1365 ThunderLAN Driver Interrupt Vectors and Table 1295ThunderLAN driver interrupt vectors and table
1366 1296
1367 Please see Chap. 4, "Interrupt Handling" of the "ThunderLAN 1297please see chap. 4, "Interrupt Handling" of the "ThunderLAN
1368 Programmer's Guide" for more informations on handling interrupts 1298Programmer's Guide" for more informations on handling interrupts
1369 generated by TLAN based adapters. 1299generated by TLAN based adapters.
1370 1300
1371****************************************************************************** 1301******************************************************************************
1372*****************************************************************************/ 1302*****************************************************************************/
@@ -1374,46 +1304,48 @@ static void TLan_SetMulticastList( struct net_device *dev )
1374 1304
1375 1305
1376 1306
1377 /*************************************************************** 1307/***************************************************************
1378 * TLan_HandleTxEOF 1308 * tlan_handle_tx_eof
1379 * 1309 *
1380 * Returns: 1310 * Returns:
1381 * 1 1311 * 1
1382 * Parms: 1312 * Parms:
1383 * dev Device assigned the IRQ that was 1313 * dev Device assigned the IRQ that was
1384 * raised. 1314 * raised.
1385 * host_int The contents of the HOST_INT 1315 * host_int The contents of the HOST_INT
1386 * port. 1316 * port.
1387 * 1317 *
1388 * This function handles Tx EOF interrupts which are raised 1318 * This function handles Tx EOF interrupts which are raised
1389 * by the adapter when it has completed sending the 1319 * by the adapter when it has completed sending the
1390 * contents of a buffer. If detemines which list/buffer 1320 * contents of a buffer. If detemines which list/buffer
1391 * was completed and resets it. If the buffer was the last 1321 * was completed and resets it. If the buffer was the last
1392 * in the channel (EOC), then the function checks to see if 1322 * in the channel (EOC), then the function checks to see if
1393 * another buffer is ready to send, and if so, sends a Tx 1323 * another buffer is ready to send, and if so, sends a Tx
1394 * Go command. Finally, the driver activates/continues the 1324 * Go command. Finally, the driver activates/continues the
1395 * activity LED. 1325 * activity LED.
1396 * 1326 *
1397 **************************************************************/ 1327 **************************************************************/
1398 1328
1399static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int ) 1329static u32 tlan_handle_tx_eof(struct net_device *dev, u16 host_int)
1400{ 1330{
1401 TLanPrivateInfo *priv = netdev_priv(dev); 1331 struct tlan_priv *priv = netdev_priv(dev);
1402 int eoc = 0; 1332 int eoc = 0;
1403 TLanList *head_list; 1333 struct tlan_list *head_list;
1404 dma_addr_t head_list_phys; 1334 dma_addr_t head_list_phys;
1405 u32 ack = 0; 1335 u32 ack = 0;
1406 u16 tmpCStat; 1336 u16 tmp_c_stat;
1407 1337
1408 TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n", 1338 TLAN_DBG(TLAN_DEBUG_TX,
1409 priv->txHead, priv->txTail ); 1339 "TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n",
1410 head_list = priv->txList + priv->txHead; 1340 priv->tx_head, priv->tx_tail);
1341 head_list = priv->tx_list + priv->tx_head;
1411 1342
1412 while (((tmpCStat = head_list->cStat ) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) { 1343 while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
1413 struct sk_buff *skb = TLan_GetSKB(head_list); 1344 && (ack < 255)) {
1345 struct sk_buff *skb = tlan_get_skb(head_list);
1414 1346
1415 ack++; 1347 ack++;
1416 pci_unmap_single(priv->pciDev, head_list->buffer[0].address, 1348 pci_unmap_single(priv->pci_dev, head_list->buffer[0].address,
1417 max(skb->len, 1349 max(skb->len,
1418 (unsigned int)TLAN_MIN_FRAME_SIZE), 1350 (unsigned int)TLAN_MIN_FRAME_SIZE),
1419 PCI_DMA_TODEVICE); 1351 PCI_DMA_TODEVICE);
@@ -1421,304 +1353,313 @@ static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
1421 head_list->buffer[8].address = 0; 1353 head_list->buffer[8].address = 0;
1422 head_list->buffer[9].address = 0; 1354 head_list->buffer[9].address = 0;
1423 1355
1424 if ( tmpCStat & TLAN_CSTAT_EOC ) 1356 if (tmp_c_stat & TLAN_CSTAT_EOC)
1425 eoc = 1; 1357 eoc = 1;
1426 1358
1427 dev->stats.tx_bytes += head_list->frameSize; 1359 dev->stats.tx_bytes += head_list->frame_size;
1428 1360
1429 head_list->cStat = TLAN_CSTAT_UNUSED; 1361 head_list->c_stat = TLAN_CSTAT_UNUSED;
1430 netif_start_queue(dev); 1362 netif_start_queue(dev);
1431 CIRC_INC( priv->txHead, TLAN_NUM_TX_LISTS ); 1363 CIRC_INC(priv->tx_head, TLAN_NUM_TX_LISTS);
1432 head_list = priv->txList + priv->txHead; 1364 head_list = priv->tx_list + priv->tx_head;
1433 } 1365 }
1434 1366
1435 if (!ack) 1367 if (!ack)
1436 printk(KERN_INFO "TLAN: Received interrupt for uncompleted TX frame.\n"); 1368 netdev_info(dev,
1437 1369 "Received interrupt for uncompleted TX frame\n");
1438 if ( eoc ) { 1370
1439 TLAN_DBG( TLAN_DEBUG_TX, 1371 if (eoc) {
1440 "TRANSMIT: Handling TX EOC (Head=%d Tail=%d)\n", 1372 TLAN_DBG(TLAN_DEBUG_TX,
1441 priv->txHead, priv->txTail ); 1373 "TRANSMIT: handling TX EOC (Head=%d Tail=%d)\n",
1442 head_list = priv->txList + priv->txHead; 1374 priv->tx_head, priv->tx_tail);
1443 head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead; 1375 head_list = priv->tx_list + priv->tx_head;
1444 if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) { 1376 head_list_phys = priv->tx_list_dma
1445 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM ); 1377 + sizeof(struct tlan_list)*priv->tx_head;
1378 if ((head_list->c_stat & TLAN_CSTAT_READY)
1379 == TLAN_CSTAT_READY) {
1380 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
1446 ack |= TLAN_HC_GO; 1381 ack |= TLAN_HC_GO;
1447 } else { 1382 } else {
1448 priv->txInProgress = 0; 1383 priv->tx_in_progress = 0;
1449 } 1384 }
1450 } 1385 }
1451 1386
1452 if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) { 1387 if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
1453 TLan_DioWrite8( dev->base_addr, 1388 tlan_dio_write8(dev->base_addr,
1454 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT ); 1389 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
1455 if ( priv->timer.function == NULL ) { 1390 if (priv->timer.function == NULL) {
1456 priv->timer.function = TLan_Timer; 1391 priv->timer.function = tlan_timer;
1457 priv->timer.data = (unsigned long) dev; 1392 priv->timer.data = (unsigned long) dev;
1458 priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY; 1393 priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
1459 priv->timerSetAt = jiffies; 1394 priv->timer_set_at = jiffies;
1460 priv->timerType = TLAN_TIMER_ACTIVITY; 1395 priv->timer_type = TLAN_TIMER_ACTIVITY;
1461 add_timer(&priv->timer); 1396 add_timer(&priv->timer);
1462 } else if ( priv->timerType == TLAN_TIMER_ACTIVITY ) { 1397 } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
1463 priv->timerSetAt = jiffies; 1398 priv->timer_set_at = jiffies;
1464 } 1399 }
1465 } 1400 }
1466 1401
1467 return ack; 1402 return ack;
1468 1403
1469} /* TLan_HandleTxEOF */ 1404}
1470 1405
1471 1406
1472 1407
1473 1408
1474 /*************************************************************** 1409/***************************************************************
1475 * TLan_HandleStatOverflow 1410 * TLan_HandleStatOverflow
1476 * 1411 *
1477 * Returns: 1412 * Returns:
1478 * 1 1413 * 1
1479 * Parms: 1414 * Parms:
1480 * dev Device assigned the IRQ that was 1415 * dev Device assigned the IRQ that was
1481 * raised. 1416 * raised.
1482 * host_int The contents of the HOST_INT 1417 * host_int The contents of the HOST_INT
1483 * port. 1418 * port.
1484 * 1419 *
1485 * This function handles the Statistics Overflow interrupt 1420 * This function handles the Statistics Overflow interrupt
1486 * which means that one or more of the TLAN statistics 1421 * which means that one or more of the TLAN statistics
1487 * registers has reached 1/2 capacity and needs to be read. 1422 * registers has reached 1/2 capacity and needs to be read.
1488 * 1423 *
1489 **************************************************************/ 1424 **************************************************************/
1490 1425
1491static u32 TLan_HandleStatOverflow( struct net_device *dev, u16 host_int ) 1426static u32 tlan_handle_stat_overflow(struct net_device *dev, u16 host_int)
1492{ 1427{
1493 TLan_ReadAndClearStats( dev, TLAN_RECORD ); 1428 tlan_read_and_clear_stats(dev, TLAN_RECORD);
1494 1429
1495 return 1; 1430 return 1;
1496 1431
1497} /* TLan_HandleStatOverflow */ 1432}
1498 1433
1499 1434
1500 1435
1501 1436
1502 /*************************************************************** 1437/***************************************************************
1503 * TLan_HandleRxEOF 1438 * TLan_HandleRxEOF
1504 * 1439 *
1505 * Returns: 1440 * Returns:
1506 * 1 1441 * 1
1507 * Parms: 1442 * Parms:
1508 * dev Device assigned the IRQ that was 1443 * dev Device assigned the IRQ that was
1509 * raised. 1444 * raised.
1510 * host_int The contents of the HOST_INT 1445 * host_int The contents of the HOST_INT
1511 * port. 1446 * port.
1512 * 1447 *
1513 * This function handles the Rx EOF interrupt which 1448 * This function handles the Rx EOF interrupt which
1514 * indicates a frame has been received by the adapter from 1449 * indicates a frame has been received by the adapter from
1515 * the net and the frame has been transferred to memory. 1450 * the net and the frame has been transferred to memory.
1516 * The function determines the bounce buffer the frame has 1451 * The function determines the bounce buffer the frame has
1517 * been loaded into, creates a new sk_buff big enough to 1452 * been loaded into, creates a new sk_buff big enough to
1518 * hold the frame, and sends it to protocol stack. It 1453 * hold the frame, and sends it to protocol stack. It
1519 * then resets the used buffer and appends it to the end 1454 * then resets the used buffer and appends it to the end
1520 * of the list. If the frame was the last in the Rx 1455 * of the list. If the frame was the last in the Rx
1521 * channel (EOC), the function restarts the receive channel 1456 * channel (EOC), the function restarts the receive channel
1522 * by sending an Rx Go command to the adapter. Then it 1457 * by sending an Rx Go command to the adapter. Then it
1523 * activates/continues the activity LED. 1458 * activates/continues the activity LED.
1524 * 1459 *
1525 **************************************************************/ 1460 **************************************************************/
1526 1461
1527static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int ) 1462static u32 tlan_handle_rx_eof(struct net_device *dev, u16 host_int)
1528{ 1463{
1529 TLanPrivateInfo *priv = netdev_priv(dev); 1464 struct tlan_priv *priv = netdev_priv(dev);
1530 u32 ack = 0; 1465 u32 ack = 0;
1531 int eoc = 0; 1466 int eoc = 0;
1532 TLanList *head_list; 1467 struct tlan_list *head_list;
1533 struct sk_buff *skb; 1468 struct sk_buff *skb;
1534 TLanList *tail_list; 1469 struct tlan_list *tail_list;
1535 u16 tmpCStat; 1470 u16 tmp_c_stat;
1536 dma_addr_t head_list_phys; 1471 dma_addr_t head_list_phys;
1537 1472
1538 TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: Handling RX EOF (Head=%d Tail=%d)\n", 1473 TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: handling RX EOF (Head=%d Tail=%d)\n",
1539 priv->rxHead, priv->rxTail ); 1474 priv->rx_head, priv->rx_tail);
1540 head_list = priv->rxList + priv->rxHead; 1475 head_list = priv->rx_list + priv->rx_head;
1541 head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead; 1476 head_list_phys =
1477 priv->rx_list_dma + sizeof(struct tlan_list)*priv->rx_head;
1542 1478
1543 while (((tmpCStat = head_list->cStat) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) { 1479 while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
1544 dma_addr_t frameDma = head_list->buffer[0].address; 1480 && (ack < 255)) {
1545 u32 frameSize = head_list->frameSize; 1481 dma_addr_t frame_dma = head_list->buffer[0].address;
1482 u32 frame_size = head_list->frame_size;
1546 struct sk_buff *new_skb; 1483 struct sk_buff *new_skb;
1547 1484
1548 ack++; 1485 ack++;
1549 if (tmpCStat & TLAN_CSTAT_EOC) 1486 if (tmp_c_stat & TLAN_CSTAT_EOC)
1550 eoc = 1; 1487 eoc = 1;
1551 1488
1552 new_skb = netdev_alloc_skb_ip_align(dev, 1489 new_skb = netdev_alloc_skb_ip_align(dev,
1553 TLAN_MAX_FRAME_SIZE + 5); 1490 TLAN_MAX_FRAME_SIZE + 5);
1554 if ( !new_skb ) 1491 if (!new_skb)
1555 goto drop_and_reuse; 1492 goto drop_and_reuse;
1556 1493
1557 skb = TLan_GetSKB(head_list); 1494 skb = tlan_get_skb(head_list);
1558 pci_unmap_single(priv->pciDev, frameDma, 1495 pci_unmap_single(priv->pci_dev, frame_dma,
1559 TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE); 1496 TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
1560 skb_put( skb, frameSize ); 1497 skb_put(skb, frame_size);
1561 1498
1562 dev->stats.rx_bytes += frameSize; 1499 dev->stats.rx_bytes += frame_size;
1563 1500
1564 skb->protocol = eth_type_trans( skb, dev ); 1501 skb->protocol = eth_type_trans(skb, dev);
1565 netif_rx( skb ); 1502 netif_rx(skb);
1566 1503
1567 head_list->buffer[0].address = pci_map_single(priv->pciDev, 1504 head_list->buffer[0].address =
1568 new_skb->data, 1505 pci_map_single(priv->pci_dev, new_skb->data,
1569 TLAN_MAX_FRAME_SIZE, 1506 TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
1570 PCI_DMA_FROMDEVICE);
1571 1507
1572 TLan_StoreSKB(head_list, new_skb); 1508 tlan_store_skb(head_list, new_skb);
1573drop_and_reuse: 1509drop_and_reuse:
1574 head_list->forward = 0; 1510 head_list->forward = 0;
1575 head_list->cStat = 0; 1511 head_list->c_stat = 0;
1576 tail_list = priv->rxList + priv->rxTail; 1512 tail_list = priv->rx_list + priv->rx_tail;
1577 tail_list->forward = head_list_phys; 1513 tail_list->forward = head_list_phys;
1578 1514
1579 CIRC_INC( priv->rxHead, TLAN_NUM_RX_LISTS ); 1515 CIRC_INC(priv->rx_head, TLAN_NUM_RX_LISTS);
1580 CIRC_INC( priv->rxTail, TLAN_NUM_RX_LISTS ); 1516 CIRC_INC(priv->rx_tail, TLAN_NUM_RX_LISTS);
1581 head_list = priv->rxList + priv->rxHead; 1517 head_list = priv->rx_list + priv->rx_head;
1582 head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead; 1518 head_list_phys = priv->rx_list_dma
1519 + sizeof(struct tlan_list)*priv->rx_head;
1583 } 1520 }
1584 1521
1585 if (!ack) 1522 if (!ack)
1586 printk(KERN_INFO "TLAN: Received interrupt for uncompleted RX frame.\n"); 1523 netdev_info(dev,
1587 1524 "Received interrupt for uncompleted RX frame\n");
1588 1525
1589 if ( eoc ) { 1526
1590 TLAN_DBG( TLAN_DEBUG_RX, 1527 if (eoc) {
1591 "RECEIVE: Handling RX EOC (Head=%d Tail=%d)\n", 1528 TLAN_DBG(TLAN_DEBUG_RX,
1592 priv->rxHead, priv->rxTail ); 1529 "RECEIVE: handling RX EOC (Head=%d Tail=%d)\n",
1593 head_list = priv->rxList + priv->rxHead; 1530 priv->rx_head, priv->rx_tail);
1594 head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead; 1531 head_list = priv->rx_list + priv->rx_head;
1595 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM ); 1532 head_list_phys = priv->rx_list_dma
1533 + sizeof(struct tlan_list)*priv->rx_head;
1534 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
1596 ack |= TLAN_HC_GO | TLAN_HC_RT; 1535 ack |= TLAN_HC_GO | TLAN_HC_RT;
1597 priv->rxEocCount++; 1536 priv->rx_eoc_count++;
1598 } 1537 }
1599 1538
1600 if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) { 1539 if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
1601 TLan_DioWrite8( dev->base_addr, 1540 tlan_dio_write8(dev->base_addr,
1602 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT ); 1541 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
1603 if ( priv->timer.function == NULL ) { 1542 if (priv->timer.function == NULL) {
1604 priv->timer.function = TLan_Timer; 1543 priv->timer.function = tlan_timer;
1605 priv->timer.data = (unsigned long) dev; 1544 priv->timer.data = (unsigned long) dev;
1606 priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY; 1545 priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
1607 priv->timerSetAt = jiffies; 1546 priv->timer_set_at = jiffies;
1608 priv->timerType = TLAN_TIMER_ACTIVITY; 1547 priv->timer_type = TLAN_TIMER_ACTIVITY;
1609 add_timer(&priv->timer); 1548 add_timer(&priv->timer);
1610 } else if ( priv->timerType == TLAN_TIMER_ACTIVITY ) { 1549 } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
1611 priv->timerSetAt = jiffies; 1550 priv->timer_set_at = jiffies;
1612 } 1551 }
1613 } 1552 }
1614 1553
1615 return ack; 1554 return ack;
1616 1555
1617} /* TLan_HandleRxEOF */ 1556}
1618 1557
1619 1558
1620 1559
1621 1560
1622 /*************************************************************** 1561/***************************************************************
1623 * TLan_HandleDummy 1562 * tlan_handle_dummy
1624 * 1563 *
1625 * Returns: 1564 * Returns:
1626 * 1 1565 * 1
1627 * Parms: 1566 * Parms:
1628 * dev Device assigned the IRQ that was 1567 * dev Device assigned the IRQ that was
1629 * raised. 1568 * raised.
1630 * host_int The contents of the HOST_INT 1569 * host_int The contents of the HOST_INT
1631 * port. 1570 * port.
1632 * 1571 *
1633 * This function handles the Dummy interrupt, which is 1572 * This function handles the Dummy interrupt, which is
1634 * raised whenever a test interrupt is generated by setting 1573 * raised whenever a test interrupt is generated by setting
1635 * the Req_Int bit of HOST_CMD to 1. 1574 * the Req_Int bit of HOST_CMD to 1.
1636 * 1575 *
1637 **************************************************************/ 1576 **************************************************************/
1638 1577
1639static u32 TLan_HandleDummy( struct net_device *dev, u16 host_int ) 1578static u32 tlan_handle_dummy(struct net_device *dev, u16 host_int)
1640{ 1579{
1641 printk( "TLAN: Test interrupt on %s.\n", dev->name ); 1580 netdev_info(dev, "Test interrupt\n");
1642 return 1; 1581 return 1;
1643 1582
1644} /* TLan_HandleDummy */ 1583}
1645 1584
1646 1585
1647 1586
1648 1587
1649 /*************************************************************** 1588/***************************************************************
1650 * TLan_HandleTxEOC 1589 * tlan_handle_tx_eoc
1651 * 1590 *
1652 * Returns: 1591 * Returns:
1653 * 1 1592 * 1
1654 * Parms: 1593 * Parms:
1655 * dev Device assigned the IRQ that was 1594 * dev Device assigned the IRQ that was
1656 * raised. 1595 * raised.
1657 * host_int The contents of the HOST_INT 1596 * host_int The contents of the HOST_INT
1658 * port. 1597 * port.
1659 * 1598 *
1660 * This driver is structured to determine EOC occurrences by 1599 * This driver is structured to determine EOC occurrences by
1661 * reading the CSTAT member of the list structure. Tx EOC 1600 * reading the CSTAT member of the list structure. Tx EOC
1662 * interrupts are disabled via the DIO INTDIS register. 1601 * interrupts are disabled via the DIO INTDIS register.
1663 * However, TLAN chips before revision 3.0 didn't have this 1602 * However, TLAN chips before revision 3.0 didn't have this
1664 * functionality, so process EOC events if this is the 1603 * functionality, so process EOC events if this is the
1665 * case. 1604 * case.
1666 * 1605 *
1667 **************************************************************/ 1606 **************************************************************/
1668 1607
1669static u32 TLan_HandleTxEOC( struct net_device *dev, u16 host_int ) 1608static u32 tlan_handle_tx_eoc(struct net_device *dev, u16 host_int)
1670{ 1609{
1671 TLanPrivateInfo *priv = netdev_priv(dev); 1610 struct tlan_priv *priv = netdev_priv(dev);
1672 TLanList *head_list; 1611 struct tlan_list *head_list;
1673 dma_addr_t head_list_phys; 1612 dma_addr_t head_list_phys;
1674 u32 ack = 1; 1613 u32 ack = 1;
1675 1614
1676 host_int = 0; 1615 host_int = 0;
1677 if ( priv->tlanRev < 0x30 ) { 1616 if (priv->tlan_rev < 0x30) {
1678 TLAN_DBG( TLAN_DEBUG_TX, 1617 TLAN_DBG(TLAN_DEBUG_TX,
1679 "TRANSMIT: Handling TX EOC (Head=%d Tail=%d) -- IRQ\n", 1618 "TRANSMIT: handling TX EOC (Head=%d Tail=%d) -- IRQ\n",
1680 priv->txHead, priv->txTail ); 1619 priv->tx_head, priv->tx_tail);
1681 head_list = priv->txList + priv->txHead; 1620 head_list = priv->tx_list + priv->tx_head;
1682 head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead; 1621 head_list_phys = priv->tx_list_dma
1683 if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) { 1622 + sizeof(struct tlan_list)*priv->tx_head;
1623 if ((head_list->c_stat & TLAN_CSTAT_READY)
1624 == TLAN_CSTAT_READY) {
1684 netif_stop_queue(dev); 1625 netif_stop_queue(dev);
1685 outl( head_list_phys, dev->base_addr + TLAN_CH_PARM ); 1626 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
1686 ack |= TLAN_HC_GO; 1627 ack |= TLAN_HC_GO;
1687 } else { 1628 } else {
1688 priv->txInProgress = 0; 1629 priv->tx_in_progress = 0;
1689 } 1630 }
1690 } 1631 }
1691 1632
1692 return ack; 1633 return ack;
1693 1634
1694} /* TLan_HandleTxEOC */ 1635}
1695 1636
1696 1637
1697 1638
1698 1639
1699 /*************************************************************** 1640/***************************************************************
1700 * TLan_HandleStatusCheck 1641 * tlan_handle_status_check
1701 * 1642 *
1702 * Returns: 1643 * Returns:
1703 * 0 if Adapter check, 1 if Network Status check. 1644 * 0 if Adapter check, 1 if Network Status check.
1704 * Parms: 1645 * Parms:
1705 * dev Device assigned the IRQ that was 1646 * dev Device assigned the IRQ that was
1706 * raised. 1647 * raised.
1707 * host_int The contents of the HOST_INT 1648 * host_int The contents of the HOST_INT
1708 * port. 1649 * port.
1709 * 1650 *
1710 * This function handles Adapter Check/Network Status 1651 * This function handles Adapter Check/Network Status
1711 * interrupts generated by the adapter. It checks the 1652 * interrupts generated by the adapter. It checks the
1712 * vector in the HOST_INT register to determine if it is 1653 * vector in the HOST_INT register to determine if it is
1713 * an Adapter Check interrupt. If so, it resets the 1654 * an Adapter Check interrupt. If so, it resets the
1714 * adapter. Otherwise it clears the status registers 1655 * adapter. Otherwise it clears the status registers
1715 * and services the PHY. 1656 * and services the PHY.
1716 * 1657 *
1717 **************************************************************/ 1658 **************************************************************/
1718 1659
1719static u32 TLan_HandleStatusCheck( struct net_device *dev, u16 host_int ) 1660static u32 tlan_handle_status_check(struct net_device *dev, u16 host_int)
1720{ 1661{
1721 TLanPrivateInfo *priv = netdev_priv(dev); 1662 struct tlan_priv *priv = netdev_priv(dev);
1722 u32 ack; 1663 u32 ack;
1723 u32 error; 1664 u32 error;
1724 u8 net_sts; 1665 u8 net_sts;
@@ -1727,92 +1668,94 @@ static u32 TLan_HandleStatusCheck( struct net_device *dev, u16 host_int )
1727 u16 tlphy_sts; 1668 u16 tlphy_sts;
1728 1669
1729 ack = 1; 1670 ack = 1;
1730 if ( host_int & TLAN_HI_IV_MASK ) { 1671 if (host_int & TLAN_HI_IV_MASK) {
1731 netif_stop_queue( dev ); 1672 netif_stop_queue(dev);
1732 error = inl( dev->base_addr + TLAN_CH_PARM ); 1673 error = inl(dev->base_addr + TLAN_CH_PARM);
1733 printk( "TLAN: %s: Adaptor Error = 0x%x\n", dev->name, error ); 1674 netdev_info(dev, "Adaptor Error = 0x%x\n", error);
1734 TLan_ReadAndClearStats( dev, TLAN_RECORD ); 1675 tlan_read_and_clear_stats(dev, TLAN_RECORD);
1735 outl( TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD ); 1676 outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
1736 1677
1737 schedule_work(&priv->tlan_tqueue); 1678 schedule_work(&priv->tlan_tqueue);
1738 1679
1739 netif_wake_queue(dev); 1680 netif_wake_queue(dev);
1740 ack = 0; 1681 ack = 0;
1741 } else { 1682 } else {
1742 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Status Check\n", dev->name ); 1683 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Status Check\n", dev->name);
1743 phy = priv->phy[priv->phyNum]; 1684 phy = priv->phy[priv->phy_num];
1744 1685
1745 net_sts = TLan_DioRead8( dev->base_addr, TLAN_NET_STS ); 1686 net_sts = tlan_dio_read8(dev->base_addr, TLAN_NET_STS);
1746 if ( net_sts ) { 1687 if (net_sts) {
1747 TLan_DioWrite8( dev->base_addr, TLAN_NET_STS, net_sts ); 1688 tlan_dio_write8(dev->base_addr, TLAN_NET_STS, net_sts);
1748 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Net_Sts = %x\n", 1689 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Net_Sts = %x\n",
1749 dev->name, (unsigned) net_sts ); 1690 dev->name, (unsigned) net_sts);
1750 } 1691 }
1751 if ( ( net_sts & TLAN_NET_STS_MIRQ ) && ( priv->phyNum == 0 ) ) { 1692 if ((net_sts & TLAN_NET_STS_MIRQ) && (priv->phy_num == 0)) {
1752 TLan_MiiReadReg( dev, phy, TLAN_TLPHY_STS, &tlphy_sts ); 1693 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_STS, &tlphy_sts);
1753 TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl ); 1694 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
1754 if ( ! ( tlphy_sts & TLAN_TS_POLOK ) && 1695 if (!(tlphy_sts & TLAN_TS_POLOK) &&
1755 ! ( tlphy_ctl & TLAN_TC_SWAPOL ) ) { 1696 !(tlphy_ctl & TLAN_TC_SWAPOL)) {
1756 tlphy_ctl |= TLAN_TC_SWAPOL; 1697 tlphy_ctl |= TLAN_TC_SWAPOL;
1757 TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl); 1698 tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
1758 } else if ( ( tlphy_sts & TLAN_TS_POLOK ) && 1699 tlphy_ctl);
1759 ( tlphy_ctl & TLAN_TC_SWAPOL ) ) { 1700 } else if ((tlphy_sts & TLAN_TS_POLOK) &&
1760 tlphy_ctl &= ~TLAN_TC_SWAPOL; 1701 (tlphy_ctl & TLAN_TC_SWAPOL)) {
1761 TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl); 1702 tlphy_ctl &= ~TLAN_TC_SWAPOL;
1762 } 1703 tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
1763 1704 tlphy_ctl);
1764 if (debug) {
1765 TLan_PhyPrint( dev );
1766 } 1705 }
1706
1707 if (debug)
1708 tlan_phy_print(dev);
1767 } 1709 }
1768 } 1710 }
1769 1711
1770 return ack; 1712 return ack;
1771 1713
1772} /* TLan_HandleStatusCheck */ 1714}
1773 1715
1774 1716
1775 1717
1776 1718
1777 /*************************************************************** 1719/***************************************************************
1778 * TLan_HandleRxEOC 1720 * tlan_handle_rx_eoc
1779 * 1721 *
1780 * Returns: 1722 * Returns:
1781 * 1 1723 * 1
1782 * Parms: 1724 * Parms:
1783 * dev Device assigned the IRQ that was 1725 * dev Device assigned the IRQ that was
1784 * raised. 1726 * raised.
1785 * host_int The contents of the HOST_INT 1727 * host_int The contents of the HOST_INT
1786 * port. 1728 * port.
1787 * 1729 *
1788 * This driver is structured to determine EOC occurrences by 1730 * This driver is structured to determine EOC occurrences by
1789 * reading the CSTAT member of the list structure. Rx EOC 1731 * reading the CSTAT member of the list structure. Rx EOC
1790 * interrupts are disabled via the DIO INTDIS register. 1732 * interrupts are disabled via the DIO INTDIS register.
1791 * However, TLAN chips before revision 3.0 didn't have this 1733 * However, TLAN chips before revision 3.0 didn't have this
1792 * CSTAT member or a INTDIS register, so if this chip is 1734 * CSTAT member or a INTDIS register, so if this chip is
1793 * pre-3.0, process EOC interrupts normally. 1735 * pre-3.0, process EOC interrupts normally.
1794 * 1736 *
1795 **************************************************************/ 1737 **************************************************************/
1796 1738
1797static u32 TLan_HandleRxEOC( struct net_device *dev, u16 host_int ) 1739static u32 tlan_handle_rx_eoc(struct net_device *dev, u16 host_int)
1798{ 1740{
1799 TLanPrivateInfo *priv = netdev_priv(dev); 1741 struct tlan_priv *priv = netdev_priv(dev);
1800 dma_addr_t head_list_phys; 1742 dma_addr_t head_list_phys;
1801 u32 ack = 1; 1743 u32 ack = 1;
1802 1744
1803 if ( priv->tlanRev < 0x30 ) { 1745 if (priv->tlan_rev < 0x30) {
1804 TLAN_DBG( TLAN_DEBUG_RX, 1746 TLAN_DBG(TLAN_DEBUG_RX,
1805 "RECEIVE: Handling RX EOC (Head=%d Tail=%d) -- IRQ\n", 1747 "RECEIVE: Handling RX EOC (head=%d tail=%d) -- IRQ\n",
1806 priv->rxHead, priv->rxTail ); 1748 priv->rx_head, priv->rx_tail);
1807 head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead; 1749 head_list_phys = priv->rx_list_dma
1808 outl( head_list_phys, dev->base_addr + TLAN_CH_PARM ); 1750 + sizeof(struct tlan_list)*priv->rx_head;
1751 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
1809 ack |= TLAN_HC_GO | TLAN_HC_RT; 1752 ack |= TLAN_HC_GO | TLAN_HC_RT;
1810 priv->rxEocCount++; 1753 priv->rx_eoc_count++;
1811 } 1754 }
1812 1755
1813 return ack; 1756 return ack;
1814 1757
1815} /* TLan_HandleRxEOC */ 1758}
1816 1759
1817 1760
1818 1761
@@ -1820,98 +1763,98 @@ static u32 TLan_HandleRxEOC( struct net_device *dev, u16 host_int )
1820/***************************************************************************** 1763/*****************************************************************************
1821****************************************************************************** 1764******************************************************************************
1822 1765
1823 ThunderLAN Driver Timer Function 1766ThunderLAN driver timer function
1824 1767
1825****************************************************************************** 1768******************************************************************************
1826*****************************************************************************/ 1769*****************************************************************************/
1827 1770
1828 1771
1829 /*************************************************************** 1772/***************************************************************
1830 * TLan_Timer 1773 * tlan_timer
1831 * 1774 *
1832 * Returns: 1775 * Returns:
1833 * Nothing 1776 * Nothing
1834 * Parms: 1777 * Parms:
1835 * data A value given to add timer when 1778 * data A value given to add timer when
1836 * add_timer was called. 1779 * add_timer was called.
1837 * 1780 *
1838 * This function handles timed functionality for the 1781 * This function handles timed functionality for the
1839 * TLAN driver. The two current timer uses are for 1782 * TLAN driver. The two current timer uses are for
1840 * delaying for autonegotionation and driving the ACT LED. 1783 * delaying for autonegotionation and driving the ACT LED.
1841 * - Autonegotiation requires being allowed about 1784 * - Autonegotiation requires being allowed about
1842 * 2 1/2 seconds before attempting to transmit a 1785 * 2 1/2 seconds before attempting to transmit a
1843 * packet. It would be a very bad thing to hang 1786 * packet. It would be a very bad thing to hang
1844 * the kernel this long, so the driver doesn't 1787 * the kernel this long, so the driver doesn't
1845 * allow transmission 'til after this time, for 1788 * allow transmission 'til after this time, for
1846 * certain PHYs. It would be much nicer if all 1789 * certain PHYs. It would be much nicer if all
1847 * PHYs were interrupt-capable like the internal 1790 * PHYs were interrupt-capable like the internal
1848 * PHY. 1791 * PHY.
1849 * - The ACT LED, which shows adapter activity, is 1792 * - The ACT LED, which shows adapter activity, is
1850 * driven by the driver, and so must be left on 1793 * driven by the driver, and so must be left on
1851 * for a short period to power up the LED so it 1794 * for a short period to power up the LED so it
1852 * can be seen. This delay can be changed by 1795 * can be seen. This delay can be changed by
1853 * changing the TLAN_TIMER_ACT_DELAY in tlan.h, 1796 * changing the TLAN_TIMER_ACT_DELAY in tlan.h,
1854 * if desired. 100 ms produces a slightly 1797 * if desired. 100 ms produces a slightly
1855 * sluggish response. 1798 * sluggish response.
1856 * 1799 *
1857 **************************************************************/ 1800 **************************************************************/
1858 1801
1859static void TLan_Timer( unsigned long data ) 1802static void tlan_timer(unsigned long data)
1860{ 1803{
1861 struct net_device *dev = (struct net_device *) data; 1804 struct net_device *dev = (struct net_device *) data;
1862 TLanPrivateInfo *priv = netdev_priv(dev); 1805 struct tlan_priv *priv = netdev_priv(dev);
1863 u32 elapsed; 1806 u32 elapsed;
1864 unsigned long flags = 0; 1807 unsigned long flags = 0;
1865 1808
1866 priv->timer.function = NULL; 1809 priv->timer.function = NULL;
1867 1810
1868 switch ( priv->timerType ) { 1811 switch (priv->timer_type) {
1869#ifdef MONITOR 1812#ifdef MONITOR
1870 case TLAN_TIMER_LINK_BEAT: 1813 case TLAN_TIMER_LINK_BEAT:
1871 TLan_PhyMonitor( dev ); 1814 tlan_phy_monitor(dev);
1872 break; 1815 break;
1873#endif 1816#endif
1874 case TLAN_TIMER_PHY_PDOWN: 1817 case TLAN_TIMER_PHY_PDOWN:
1875 TLan_PhyPowerDown( dev ); 1818 tlan_phy_power_down(dev);
1876 break; 1819 break;
1877 case TLAN_TIMER_PHY_PUP: 1820 case TLAN_TIMER_PHY_PUP:
1878 TLan_PhyPowerUp( dev ); 1821 tlan_phy_power_up(dev);
1879 break; 1822 break;
1880 case TLAN_TIMER_PHY_RESET: 1823 case TLAN_TIMER_PHY_RESET:
1881 TLan_PhyReset( dev ); 1824 tlan_phy_reset(dev);
1882 break; 1825 break;
1883 case TLAN_TIMER_PHY_START_LINK: 1826 case TLAN_TIMER_PHY_START_LINK:
1884 TLan_PhyStartLink( dev ); 1827 tlan_phy_start_link(dev);
1885 break; 1828 break;
1886 case TLAN_TIMER_PHY_FINISH_AN: 1829 case TLAN_TIMER_PHY_FINISH_AN:
1887 TLan_PhyFinishAutoNeg( dev ); 1830 tlan_phy_finish_auto_neg(dev);
1888 break; 1831 break;
1889 case TLAN_TIMER_FINISH_RESET: 1832 case TLAN_TIMER_FINISH_RESET:
1890 TLan_FinishReset( dev ); 1833 tlan_finish_reset(dev);
1891 break; 1834 break;
1892 case TLAN_TIMER_ACTIVITY: 1835 case TLAN_TIMER_ACTIVITY:
1893 spin_lock_irqsave(&priv->lock, flags); 1836 spin_lock_irqsave(&priv->lock, flags);
1894 if ( priv->timer.function == NULL ) { 1837 if (priv->timer.function == NULL) {
1895 elapsed = jiffies - priv->timerSetAt; 1838 elapsed = jiffies - priv->timer_set_at;
1896 if ( elapsed >= TLAN_TIMER_ACT_DELAY ) { 1839 if (elapsed >= TLAN_TIMER_ACT_DELAY) {
1897 TLan_DioWrite8( dev->base_addr, 1840 tlan_dio_write8(dev->base_addr,
1898 TLAN_LED_REG, TLAN_LED_LINK ); 1841 TLAN_LED_REG, TLAN_LED_LINK);
1899 } else { 1842 } else {
1900 priv->timer.function = TLan_Timer; 1843 priv->timer.function = tlan_timer;
1901 priv->timer.expires = priv->timerSetAt 1844 priv->timer.expires = priv->timer_set_at
1902 + TLAN_TIMER_ACT_DELAY; 1845 + TLAN_TIMER_ACT_DELAY;
1903 spin_unlock_irqrestore(&priv->lock, flags); 1846 spin_unlock_irqrestore(&priv->lock, flags);
1904 add_timer( &priv->timer ); 1847 add_timer(&priv->timer);
1905 break; 1848 break;
1906 }
1907 } 1849 }
1908 spin_unlock_irqrestore(&priv->lock, flags); 1850 }
1909 break; 1851 spin_unlock_irqrestore(&priv->lock, flags);
1910 default: 1852 break;
1911 break; 1853 default:
1854 break;
1912 } 1855 }
1913 1856
1914} /* TLan_Timer */ 1857}
1915 1858
1916 1859
1917 1860
@@ -1919,39 +1862,39 @@ static void TLan_Timer( unsigned long data )
1919/***************************************************************************** 1862/*****************************************************************************
1920****************************************************************************** 1863******************************************************************************
1921 1864
1922 ThunderLAN Driver Adapter Related Routines 1865ThunderLAN driver adapter related routines
1923 1866
1924****************************************************************************** 1867******************************************************************************
1925*****************************************************************************/ 1868*****************************************************************************/
1926 1869
1927 1870
1928 /*************************************************************** 1871/***************************************************************
1929 * TLan_ResetLists 1872 * tlan_reset_lists
1930 * 1873 *
1931 * Returns: 1874 * Returns:
1932 * Nothing 1875 * Nothing
1933 * Parms: 1876 * Parms:
1934 * dev The device structure with the list 1877 * dev The device structure with the list
1935 * stuctures to be reset. 1878 * stuctures to be reset.
1936 * 1879 *
1937 * This routine sets the variables associated with managing 1880 * This routine sets the variables associated with managing
1938 * the TLAN lists to their initial values. 1881 * the TLAN lists to their initial values.
1939 * 1882 *
1940 **************************************************************/ 1883 **************************************************************/
1941 1884
1942static void TLan_ResetLists( struct net_device *dev ) 1885static void tlan_reset_lists(struct net_device *dev)
1943{ 1886{
1944 TLanPrivateInfo *priv = netdev_priv(dev); 1887 struct tlan_priv *priv = netdev_priv(dev);
1945 int i; 1888 int i;
1946 TLanList *list; 1889 struct tlan_list *list;
1947 dma_addr_t list_phys; 1890 dma_addr_t list_phys;
1948 struct sk_buff *skb; 1891 struct sk_buff *skb;
1949 1892
1950 priv->txHead = 0; 1893 priv->tx_head = 0;
1951 priv->txTail = 0; 1894 priv->tx_tail = 0;
1952 for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) { 1895 for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
1953 list = priv->txList + i; 1896 list = priv->tx_list + i;
1954 list->cStat = TLAN_CSTAT_UNUSED; 1897 list->c_stat = TLAN_CSTAT_UNUSED;
1955 list->buffer[0].address = 0; 1898 list->buffer[0].address = 0;
1956 list->buffer[2].count = 0; 1899 list->buffer[2].count = 0;
1957 list->buffer[2].address = 0; 1900 list->buffer[2].address = 0;
@@ -1959,169 +1902,169 @@ static void TLan_ResetLists( struct net_device *dev )
1959 list->buffer[9].address = 0; 1902 list->buffer[9].address = 0;
1960 } 1903 }
1961 1904
1962 priv->rxHead = 0; 1905 priv->rx_head = 0;
1963 priv->rxTail = TLAN_NUM_RX_LISTS - 1; 1906 priv->rx_tail = TLAN_NUM_RX_LISTS - 1;
1964 for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) { 1907 for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
1965 list = priv->rxList + i; 1908 list = priv->rx_list + i;
1966 list_phys = priv->rxListDMA + sizeof(TLanList) * i; 1909 list_phys = priv->rx_list_dma + sizeof(struct tlan_list)*i;
1967 list->cStat = TLAN_CSTAT_READY; 1910 list->c_stat = TLAN_CSTAT_READY;
1968 list->frameSize = TLAN_MAX_FRAME_SIZE; 1911 list->frame_size = TLAN_MAX_FRAME_SIZE;
1969 list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER; 1912 list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
1970 skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5); 1913 skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5);
1971 if ( !skb ) { 1914 if (!skb) {
1972 pr_err("TLAN: out of memory for received data.\n" ); 1915 netdev_err(dev, "Out of memory for received data\n");
1973 break; 1916 break;
1974 } 1917 }
1975 1918
1976 list->buffer[0].address = pci_map_single(priv->pciDev, 1919 list->buffer[0].address = pci_map_single(priv->pci_dev,
1977 skb->data, 1920 skb->data,
1978 TLAN_MAX_FRAME_SIZE, 1921 TLAN_MAX_FRAME_SIZE,
1979 PCI_DMA_FROMDEVICE); 1922 PCI_DMA_FROMDEVICE);
1980 TLan_StoreSKB(list, skb); 1923 tlan_store_skb(list, skb);
1981 list->buffer[1].count = 0; 1924 list->buffer[1].count = 0;
1982 list->buffer[1].address = 0; 1925 list->buffer[1].address = 0;
1983 list->forward = list_phys + sizeof(TLanList); 1926 list->forward = list_phys + sizeof(struct tlan_list);
1984 } 1927 }
1985 1928
1986 /* in case ran out of memory early, clear bits */ 1929 /* in case ran out of memory early, clear bits */
1987 while (i < TLAN_NUM_RX_LISTS) { 1930 while (i < TLAN_NUM_RX_LISTS) {
1988 TLan_StoreSKB(priv->rxList + i, NULL); 1931 tlan_store_skb(priv->rx_list + i, NULL);
1989 ++i; 1932 ++i;
1990 } 1933 }
1991 list->forward = 0; 1934 list->forward = 0;
1992 1935
1993} /* TLan_ResetLists */ 1936}
1994 1937
1995 1938
1996static void TLan_FreeLists( struct net_device *dev ) 1939static void tlan_free_lists(struct net_device *dev)
1997{ 1940{
1998 TLanPrivateInfo *priv = netdev_priv(dev); 1941 struct tlan_priv *priv = netdev_priv(dev);
1999 int i; 1942 int i;
2000 TLanList *list; 1943 struct tlan_list *list;
2001 struct sk_buff *skb; 1944 struct sk_buff *skb;
2002 1945
2003 for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) { 1946 for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
2004 list = priv->txList + i; 1947 list = priv->tx_list + i;
2005 skb = TLan_GetSKB(list); 1948 skb = tlan_get_skb(list);
2006 if ( skb ) { 1949 if (skb) {
2007 pci_unmap_single( 1950 pci_unmap_single(
2008 priv->pciDev, 1951 priv->pci_dev,
2009 list->buffer[0].address, 1952 list->buffer[0].address,
2010 max(skb->len, 1953 max(skb->len,
2011 (unsigned int)TLAN_MIN_FRAME_SIZE), 1954 (unsigned int)TLAN_MIN_FRAME_SIZE),
2012 PCI_DMA_TODEVICE); 1955 PCI_DMA_TODEVICE);
2013 dev_kfree_skb_any( skb ); 1956 dev_kfree_skb_any(skb);
2014 list->buffer[8].address = 0; 1957 list->buffer[8].address = 0;
2015 list->buffer[9].address = 0; 1958 list->buffer[9].address = 0;
2016 } 1959 }
2017 } 1960 }
2018 1961
2019 for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) { 1962 for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
2020 list = priv->rxList + i; 1963 list = priv->rx_list + i;
2021 skb = TLan_GetSKB(list); 1964 skb = tlan_get_skb(list);
2022 if ( skb ) { 1965 if (skb) {
2023 pci_unmap_single(priv->pciDev, 1966 pci_unmap_single(priv->pci_dev,
2024 list->buffer[0].address, 1967 list->buffer[0].address,
2025 TLAN_MAX_FRAME_SIZE, 1968 TLAN_MAX_FRAME_SIZE,
2026 PCI_DMA_FROMDEVICE); 1969 PCI_DMA_FROMDEVICE);
2027 dev_kfree_skb_any( skb ); 1970 dev_kfree_skb_any(skb);
2028 list->buffer[8].address = 0; 1971 list->buffer[8].address = 0;
2029 list->buffer[9].address = 0; 1972 list->buffer[9].address = 0;
2030 } 1973 }
2031 } 1974 }
2032} /* TLan_FreeLists */ 1975}
2033 1976
2034 1977
2035 1978
2036 1979
2037 /*************************************************************** 1980/***************************************************************
2038 * TLan_PrintDio 1981 * tlan_print_dio
2039 * 1982 *
2040 * Returns: 1983 * Returns:
2041 * Nothing 1984 * Nothing
2042 * Parms: 1985 * Parms:
2043 * io_base Base IO port of the device of 1986 * io_base Base IO port of the device of
2044 * which to print DIO registers. 1987 * which to print DIO registers.
2045 * 1988 *
2046 * This function prints out all the internal (DIO) 1989 * This function prints out all the internal (DIO)
2047 * registers of a TLAN chip. 1990 * registers of a TLAN chip.
2048 * 1991 *
2049 **************************************************************/ 1992 **************************************************************/
2050 1993
2051static void TLan_PrintDio( u16 io_base ) 1994static void tlan_print_dio(u16 io_base)
2052{ 1995{
2053 u32 data0, data1; 1996 u32 data0, data1;
2054 int i; 1997 int i;
2055 1998
2056 printk( "TLAN: Contents of internal registers for io base 0x%04hx.\n", 1999 pr_info("Contents of internal registers for io base 0x%04hx\n",
2057 io_base ); 2000 io_base);
2058 printk( "TLAN: Off. +0 +4\n" ); 2001 pr_info("Off. +0 +4\n");
2059 for ( i = 0; i < 0x4C; i+= 8 ) { 2002 for (i = 0; i < 0x4C; i += 8) {
2060 data0 = TLan_DioRead32( io_base, i ); 2003 data0 = tlan_dio_read32(io_base, i);
2061 data1 = TLan_DioRead32( io_base, i + 0x4 ); 2004 data1 = tlan_dio_read32(io_base, i + 0x4);
2062 printk( "TLAN: 0x%02x 0x%08x 0x%08x\n", i, data0, data1 ); 2005 pr_info("0x%02x 0x%08x 0x%08x\n", i, data0, data1);
2063 } 2006 }
2064 2007
2065} /* TLan_PrintDio */ 2008}
2066 2009
2067 2010
2068 2011
2069 2012
2070 /*************************************************************** 2013/***************************************************************
2071 * TLan_PrintList 2014 * TLan_PrintList
2072 * 2015 *
2073 * Returns: 2016 * Returns:
2074 * Nothing 2017 * Nothing
2075 * Parms: 2018 * Parms:
2076 * list A pointer to the TLanList structure to 2019 * list A pointer to the struct tlan_list structure to
2077 * be printed. 2020 * be printed.
2078 * type A string to designate type of list, 2021 * type A string to designate type of list,
2079 * "Rx" or "Tx". 2022 * "Rx" or "Tx".
2080 * num The index of the list. 2023 * num The index of the list.
2081 * 2024 *
2082 * This function prints out the contents of the list 2025 * This function prints out the contents of the list
2083 * pointed to by the list parameter. 2026 * pointed to by the list parameter.
2084 * 2027 *
2085 **************************************************************/ 2028 **************************************************************/
2086 2029
2087static void TLan_PrintList( TLanList *list, char *type, int num) 2030static void tlan_print_list(struct tlan_list *list, char *type, int num)
2088{ 2031{
2089 int i; 2032 int i;
2090 2033
2091 printk( "TLAN: %s List %d at %p\n", type, num, list ); 2034 pr_info("%s List %d at %p\n", type, num, list);
2092 printk( "TLAN: Forward = 0x%08x\n", list->forward ); 2035 pr_info(" Forward = 0x%08x\n", list->forward);
2093 printk( "TLAN: CSTAT = 0x%04hx\n", list->cStat ); 2036 pr_info(" CSTAT = 0x%04hx\n", list->c_stat);
2094 printk( "TLAN: Frame Size = 0x%04hx\n", list->frameSize ); 2037 pr_info(" Frame Size = 0x%04hx\n", list->frame_size);
2095 /* for ( i = 0; i < 10; i++ ) { */ 2038 /* for (i = 0; i < 10; i++) { */
2096 for ( i = 0; i < 2; i++ ) { 2039 for (i = 0; i < 2; i++) {
2097 printk( "TLAN: Buffer[%d].count, addr = 0x%08x, 0x%08x\n", 2040 pr_info(" Buffer[%d].count, addr = 0x%08x, 0x%08x\n",
2098 i, list->buffer[i].count, list->buffer[i].address ); 2041 i, list->buffer[i].count, list->buffer[i].address);
2099 } 2042 }
2100 2043
2101} /* TLan_PrintList */ 2044}
2102 2045
2103 2046
2104 2047
2105 2048
2106 /*************************************************************** 2049/***************************************************************
2107 * TLan_ReadAndClearStats 2050 * tlan_read_and_clear_stats
2108 * 2051 *
2109 * Returns: 2052 * Returns:
2110 * Nothing 2053 * Nothing
2111 * Parms: 2054 * Parms:
2112 * dev Pointer to device structure of adapter 2055 * dev Pointer to device structure of adapter
2113 * to which to read stats. 2056 * to which to read stats.
2114 * record Flag indicating whether to add 2057 * record Flag indicating whether to add
2115 * 2058 *
2116 * This functions reads all the internal status registers 2059 * This functions reads all the internal status registers
2117 * of the TLAN chip, which clears them as a side effect. 2060 * of the TLAN chip, which clears them as a side effect.
2118 * It then either adds the values to the device's status 2061 * It then either adds the values to the device's status
2119 * struct, or discards them, depending on whether record 2062 * struct, or discards them, depending on whether record
2120 * is TLAN_RECORD (!=0) or TLAN_IGNORE (==0). 2063 * is TLAN_RECORD (!=0) or TLAN_IGNORE (==0).
2121 * 2064 *
2122 **************************************************************/ 2065 **************************************************************/
2123 2066
2124static void TLan_ReadAndClearStats( struct net_device *dev, int record ) 2067static void tlan_read_and_clear_stats(struct net_device *dev, int record)
2125{ 2068{
2126 u32 tx_good, tx_under; 2069 u32 tx_good, tx_under;
2127 u32 rx_good, rx_over; 2070 u32 rx_good, rx_over;
@@ -2129,41 +2072,42 @@ static void TLan_ReadAndClearStats( struct net_device *dev, int record )
2129 u32 multi_col, single_col; 2072 u32 multi_col, single_col;
2130 u32 excess_col, late_col, loss; 2073 u32 excess_col, late_col, loss;
2131 2074
2132 outw( TLAN_GOOD_TX_FRMS, dev->base_addr + TLAN_DIO_ADR ); 2075 outw(TLAN_GOOD_TX_FRMS, dev->base_addr + TLAN_DIO_ADR);
2133 tx_good = inb( dev->base_addr + TLAN_DIO_DATA ); 2076 tx_good = inb(dev->base_addr + TLAN_DIO_DATA);
2134 tx_good += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8; 2077 tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
2135 tx_good += inb( dev->base_addr + TLAN_DIO_DATA + 2 ) << 16; 2078 tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
2136 tx_under = inb( dev->base_addr + TLAN_DIO_DATA + 3 ); 2079 tx_under = inb(dev->base_addr + TLAN_DIO_DATA + 3);
2137 2080
2138 outw( TLAN_GOOD_RX_FRMS, dev->base_addr + TLAN_DIO_ADR ); 2081 outw(TLAN_GOOD_RX_FRMS, dev->base_addr + TLAN_DIO_ADR);
2139 rx_good = inb( dev->base_addr + TLAN_DIO_DATA ); 2082 rx_good = inb(dev->base_addr + TLAN_DIO_DATA);
2140 rx_good += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8; 2083 rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
2141 rx_good += inb( dev->base_addr + TLAN_DIO_DATA + 2 ) << 16; 2084 rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
2142 rx_over = inb( dev->base_addr + TLAN_DIO_DATA + 3 ); 2085 rx_over = inb(dev->base_addr + TLAN_DIO_DATA + 3);
2143 2086
2144 outw( TLAN_DEFERRED_TX, dev->base_addr + TLAN_DIO_ADR ); 2087 outw(TLAN_DEFERRED_TX, dev->base_addr + TLAN_DIO_ADR);
2145 def_tx = inb( dev->base_addr + TLAN_DIO_DATA ); 2088 def_tx = inb(dev->base_addr + TLAN_DIO_DATA);
2146 def_tx += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8; 2089 def_tx += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
2147 crc = inb( dev->base_addr + TLAN_DIO_DATA + 2 ); 2090 crc = inb(dev->base_addr + TLAN_DIO_DATA + 2);
2148 code = inb( dev->base_addr + TLAN_DIO_DATA + 3 ); 2091 code = inb(dev->base_addr + TLAN_DIO_DATA + 3);
2149 2092
2150 outw( TLAN_MULTICOL_FRMS, dev->base_addr + TLAN_DIO_ADR ); 2093 outw(TLAN_MULTICOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
2151 multi_col = inb( dev->base_addr + TLAN_DIO_DATA ); 2094 multi_col = inb(dev->base_addr + TLAN_DIO_DATA);
2152 multi_col += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8; 2095 multi_col += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
2153 single_col = inb( dev->base_addr + TLAN_DIO_DATA + 2 ); 2096 single_col = inb(dev->base_addr + TLAN_DIO_DATA + 2);
2154 single_col += inb( dev->base_addr + TLAN_DIO_DATA + 3 ) << 8; 2097 single_col += inb(dev->base_addr + TLAN_DIO_DATA + 3) << 8;
2155 2098
2156 outw( TLAN_EXCESSCOL_FRMS, dev->base_addr + TLAN_DIO_ADR ); 2099 outw(TLAN_EXCESSCOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
2157 excess_col = inb( dev->base_addr + TLAN_DIO_DATA ); 2100 excess_col = inb(dev->base_addr + TLAN_DIO_DATA);
2158 late_col = inb( dev->base_addr + TLAN_DIO_DATA + 1 ); 2101 late_col = inb(dev->base_addr + TLAN_DIO_DATA + 1);
2159 loss = inb( dev->base_addr + TLAN_DIO_DATA + 2 ); 2102 loss = inb(dev->base_addr + TLAN_DIO_DATA + 2);
2160 2103
2161 if ( record ) { 2104 if (record) {
2162 dev->stats.rx_packets += rx_good; 2105 dev->stats.rx_packets += rx_good;
2163 dev->stats.rx_errors += rx_over + crc + code; 2106 dev->stats.rx_errors += rx_over + crc + code;
2164 dev->stats.tx_packets += tx_good; 2107 dev->stats.tx_packets += tx_good;
2165 dev->stats.tx_errors += tx_under + loss; 2108 dev->stats.tx_errors += tx_under + loss;
2166 dev->stats.collisions += multi_col + single_col + excess_col + late_col; 2109 dev->stats.collisions += multi_col
2110 + single_col + excess_col + late_col;
2167 2111
2168 dev->stats.rx_over_errors += rx_over; 2112 dev->stats.rx_over_errors += rx_over;
2169 dev->stats.rx_crc_errors += crc; 2113 dev->stats.rx_crc_errors += crc;
@@ -2173,39 +2117,39 @@ static void TLan_ReadAndClearStats( struct net_device *dev, int record )
2173 dev->stats.tx_carrier_errors += loss; 2117 dev->stats.tx_carrier_errors += loss;
2174 } 2118 }
2175 2119
2176} /* TLan_ReadAndClearStats */ 2120}
2177 2121
2178 2122
2179 2123
2180 2124
2181 /*************************************************************** 2125/***************************************************************
2182 * TLan_Reset 2126 * TLan_Reset
2183 * 2127 *
2184 * Returns: 2128 * Returns:
2185 * 0 2129 * 0
2186 * Parms: 2130 * Parms:
2187 * dev Pointer to device structure of adapter 2131 * dev Pointer to device structure of adapter
2188 * to be reset. 2132 * to be reset.
2189 * 2133 *
2190 * This function resets the adapter and it's physical 2134 * This function resets the adapter and it's physical
2191 * device. See Chap. 3, pp. 9-10 of the "ThunderLAN 2135 * device. See Chap. 3, pp. 9-10 of the "ThunderLAN
2192 * Programmer's Guide" for details. The routine tries to 2136 * Programmer's Guide" for details. The routine tries to
2193 * implement what is detailed there, though adjustments 2137 * implement what is detailed there, though adjustments
2194 * have been made. 2138 * have been made.
2195 * 2139 *
2196 **************************************************************/ 2140 **************************************************************/
2197 2141
2198static void 2142static void
2199TLan_ResetAdapter( struct net_device *dev ) 2143tlan_reset_adapter(struct net_device *dev)
2200{ 2144{
2201 TLanPrivateInfo *priv = netdev_priv(dev); 2145 struct tlan_priv *priv = netdev_priv(dev);
2202 int i; 2146 int i;
2203 u32 addr; 2147 u32 addr;
2204 u32 data; 2148 u32 data;
2205 u8 data8; 2149 u8 data8;
2206 2150
2207 priv->tlanFullDuplex = false; 2151 priv->tlan_full_duplex = false;
2208 priv->phyOnline=0; 2152 priv->phy_online = 0;
2209 netif_carrier_off(dev); 2153 netif_carrier_off(dev);
2210 2154
2211/* 1. Assert reset bit. */ 2155/* 1. Assert reset bit. */
@@ -2216,7 +2160,7 @@ TLan_ResetAdapter( struct net_device *dev )
2216 2160
2217 udelay(1000); 2161 udelay(1000);
2218 2162
2219/* 2. Turn off interrupts. ( Probably isn't necessary ) */ 2163/* 2. Turn off interrupts. (Probably isn't necessary) */
2220 2164
2221 data = inl(dev->base_addr + TLAN_HOST_CMD); 2165 data = inl(dev->base_addr + TLAN_HOST_CMD);
2222 data |= TLAN_HC_INT_OFF; 2166 data |= TLAN_HC_INT_OFF;
@@ -2224,207 +2168,204 @@ TLan_ResetAdapter( struct net_device *dev )
2224 2168
2225/* 3. Clear AREGs and HASHs. */ 2169/* 3. Clear AREGs and HASHs. */
2226 2170
2227 for ( i = TLAN_AREG_0; i <= TLAN_HASH_2; i += 4 ) { 2171 for (i = TLAN_AREG_0; i <= TLAN_HASH_2; i += 4)
2228 TLan_DioWrite32( dev->base_addr, (u16) i, 0 ); 2172 tlan_dio_write32(dev->base_addr, (u16) i, 0);
2229 }
2230 2173
2231/* 4. Setup NetConfig register. */ 2174/* 4. Setup NetConfig register. */
2232 2175
2233 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN; 2176 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
2234 TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, (u16) data ); 2177 tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
2235 2178
2236/* 5. Load Ld_Tmr and Ld_Thr in HOST_CMD. */ 2179/* 5. Load Ld_Tmr and Ld_Thr in HOST_CMD. */
2237 2180
2238 outl( TLAN_HC_LD_TMR | 0x3f, dev->base_addr + TLAN_HOST_CMD ); 2181 outl(TLAN_HC_LD_TMR | 0x3f, dev->base_addr + TLAN_HOST_CMD);
2239 outl( TLAN_HC_LD_THR | 0x9, dev->base_addr + TLAN_HOST_CMD ); 2182 outl(TLAN_HC_LD_THR | 0x9, dev->base_addr + TLAN_HOST_CMD);
2240 2183
2241/* 6. Unreset the MII by setting NMRST (in NetSio) to 1. */ 2184/* 6. Unreset the MII by setting NMRST (in NetSio) to 1. */
2242 2185
2243 outw( TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR ); 2186 outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
2244 addr = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO; 2187 addr = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
2245 TLan_SetBit( TLAN_NET_SIO_NMRST, addr ); 2188 tlan_set_bit(TLAN_NET_SIO_NMRST, addr);
2246 2189
2247/* 7. Setup the remaining registers. */ 2190/* 7. Setup the remaining registers. */
2248 2191
2249 if ( priv->tlanRev >= 0x30 ) { 2192 if (priv->tlan_rev >= 0x30) {
2250 data8 = TLAN_ID_TX_EOC | TLAN_ID_RX_EOC; 2193 data8 = TLAN_ID_TX_EOC | TLAN_ID_RX_EOC;
2251 TLan_DioWrite8( dev->base_addr, TLAN_INT_DIS, data8 ); 2194 tlan_dio_write8(dev->base_addr, TLAN_INT_DIS, data8);
2252 } 2195 }
2253 TLan_PhyDetect( dev ); 2196 tlan_phy_detect(dev);
2254 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN; 2197 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN;
2255 2198
2256 if ( priv->adapter->flags & TLAN_ADAPTER_BIT_RATE_PHY ) { 2199 if (priv->adapter->flags & TLAN_ADAPTER_BIT_RATE_PHY) {
2257 data |= TLAN_NET_CFG_BIT; 2200 data |= TLAN_NET_CFG_BIT;
2258 if ( priv->aui == 1 ) { 2201 if (priv->aui == 1) {
2259 TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x0a ); 2202 tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x0a);
2260 } else if ( priv->duplex == TLAN_DUPLEX_FULL ) { 2203 } else if (priv->duplex == TLAN_DUPLEX_FULL) {
2261 TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x00 ); 2204 tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x00);
2262 priv->tlanFullDuplex = true; 2205 priv->tlan_full_duplex = true;
2263 } else { 2206 } else {
2264 TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x08 ); 2207 tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x08);
2265 } 2208 }
2266 } 2209 }
2267 2210
2268 if ( priv->phyNum == 0 ) { 2211 if (priv->phy_num == 0)
2269 data |= TLAN_NET_CFG_PHY_EN; 2212 data |= TLAN_NET_CFG_PHY_EN;
2270 } 2213 tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
2271 TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, (u16) data );
2272 2214
2273 if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) { 2215 if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY)
2274 TLan_FinishReset( dev ); 2216 tlan_finish_reset(dev);
2275 } else { 2217 else
2276 TLan_PhyPowerDown( dev ); 2218 tlan_phy_power_down(dev);
2277 }
2278 2219
2279} /* TLan_ResetAdapter */ 2220}
2280 2221
2281 2222
2282 2223
2283 2224
2284static void 2225static void
2285TLan_FinishReset( struct net_device *dev ) 2226tlan_finish_reset(struct net_device *dev)
2286{ 2227{
2287 TLanPrivateInfo *priv = netdev_priv(dev); 2228 struct tlan_priv *priv = netdev_priv(dev);
2288 u8 data; 2229 u8 data;
2289 u32 phy; 2230 u32 phy;
2290 u8 sio; 2231 u8 sio;
2291 u16 status; 2232 u16 status;
2292 u16 partner; 2233 u16 partner;
2293 u16 tlphy_ctl; 2234 u16 tlphy_ctl;
2294 u16 tlphy_par; 2235 u16 tlphy_par;
2295 u16 tlphy_id1, tlphy_id2; 2236 u16 tlphy_id1, tlphy_id2;
2296 int i; 2237 int i;
2297 2238
2298 phy = priv->phy[priv->phyNum]; 2239 phy = priv->phy[priv->phy_num];
2299 2240
2300 data = TLAN_NET_CMD_NRESET | TLAN_NET_CMD_NWRAP; 2241 data = TLAN_NET_CMD_NRESET | TLAN_NET_CMD_NWRAP;
2301 if ( priv->tlanFullDuplex ) { 2242 if (priv->tlan_full_duplex)
2302 data |= TLAN_NET_CMD_DUPLEX; 2243 data |= TLAN_NET_CMD_DUPLEX;
2303 } 2244 tlan_dio_write8(dev->base_addr, TLAN_NET_CMD, data);
2304 TLan_DioWrite8( dev->base_addr, TLAN_NET_CMD, data );
2305 data = TLAN_NET_MASK_MASK4 | TLAN_NET_MASK_MASK5; 2245 data = TLAN_NET_MASK_MASK4 | TLAN_NET_MASK_MASK5;
2306 if ( priv->phyNum == 0 ) { 2246 if (priv->phy_num == 0)
2307 data |= TLAN_NET_MASK_MASK7; 2247 data |= TLAN_NET_MASK_MASK7;
2308 } 2248 tlan_dio_write8(dev->base_addr, TLAN_NET_MASK, data);
2309 TLan_DioWrite8( dev->base_addr, TLAN_NET_MASK, data ); 2249 tlan_dio_write16(dev->base_addr, TLAN_MAX_RX, ((1536)+7)&~7);
2310 TLan_DioWrite16( dev->base_addr, TLAN_MAX_RX, ((1536)+7)&~7 ); 2250 tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &tlphy_id1);
2311 TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &tlphy_id1 ); 2251 tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &tlphy_id2);
2312 TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &tlphy_id2 );
2313 2252
2314 if ( ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) || 2253 if ((priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) ||
2315 ( priv->aui ) ) { 2254 (priv->aui)) {
2316 status = MII_GS_LINK; 2255 status = MII_GS_LINK;
2317 printk( "TLAN: %s: Link forced.\n", dev->name ); 2256 netdev_info(dev, "Link forced\n");
2318 } else { 2257 } else {
2319 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status ); 2258 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2320 udelay( 1000 ); 2259 udelay(1000);
2321 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status ); 2260 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2322 if ( (status & MII_GS_LINK) && 2261 if ((status & MII_GS_LINK) &&
2323 /* We only support link info on Nat.Sem. PHY's */ 2262 /* We only support link info on Nat.Sem. PHY's */
2324 (tlphy_id1 == NAT_SEM_ID1) && 2263 (tlphy_id1 == NAT_SEM_ID1) &&
2325 (tlphy_id2 == NAT_SEM_ID2) ) { 2264 (tlphy_id2 == NAT_SEM_ID2)) {
2326 TLan_MiiReadReg( dev, phy, MII_AN_LPA, &partner ); 2265 tlan_mii_read_reg(dev, phy, MII_AN_LPA, &partner);
2327 TLan_MiiReadReg( dev, phy, TLAN_TLPHY_PAR, &tlphy_par ); 2266 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_PAR, &tlphy_par);
2328 2267
2329 printk( "TLAN: %s: Link active with ", dev->name ); 2268 netdev_info(dev,
2330 if (!(tlphy_par & TLAN_PHY_AN_EN_STAT)) { 2269 "Link active with %s %uMbps %s-Duplex\n",
2331 printk( "forced 10%sMbps %s-Duplex\n", 2270 !(tlphy_par & TLAN_PHY_AN_EN_STAT)
2332 tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0", 2271 ? "forced" : "Autonegotiation enabled,",
2333 tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half"); 2272 tlphy_par & TLAN_PHY_SPEED_100
2334 } else { 2273 ? 100 : 10,
2335 printk( "AutoNegotiation enabled, at 10%sMbps %s-Duplex\n", 2274 tlphy_par & TLAN_PHY_DUPLEX_FULL
2336 tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0", 2275 ? "Full" : "Half");
2337 tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half"); 2276
2338 printk("TLAN: Partner capability: "); 2277 if (tlphy_par & TLAN_PHY_AN_EN_STAT) {
2339 for (i = 5; i <= 10; i++) 2278 netdev_info(dev, "Partner capability:");
2340 if (partner & (1<<i)) 2279 for (i = 5; i < 10; i++)
2341 printk("%s",media[i-5]); 2280 if (partner & (1 << i))
2342 printk("\n"); 2281 pr_cont(" %s", media[i-5]);
2282 pr_cont("\n");
2343 } 2283 }
2344 2284
2345 TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK ); 2285 tlan_dio_write8(dev->base_addr, TLAN_LED_REG,
2286 TLAN_LED_LINK);
2346#ifdef MONITOR 2287#ifdef MONITOR
2347 /* We have link beat..for now anyway */ 2288 /* We have link beat..for now anyway */
2348 priv->link = 1; 2289 priv->link = 1;
2349 /*Enabling link beat monitoring */ 2290 /*Enabling link beat monitoring */
2350 TLan_SetTimer( dev, (10*HZ), TLAN_TIMER_LINK_BEAT ); 2291 tlan_set_timer(dev, (10*HZ), TLAN_TIMER_LINK_BEAT);
2351#endif 2292#endif
2352 } else if (status & MII_GS_LINK) { 2293 } else if (status & MII_GS_LINK) {
2353 printk( "TLAN: %s: Link active\n", dev->name ); 2294 netdev_info(dev, "Link active\n");
2354 TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK ); 2295 tlan_dio_write8(dev->base_addr, TLAN_LED_REG,
2296 TLAN_LED_LINK);
2355 } 2297 }
2356 } 2298 }
2357 2299
2358 if ( priv->phyNum == 0 ) { 2300 if (priv->phy_num == 0) {
2359 TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl ); 2301 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
2360 tlphy_ctl |= TLAN_TC_INTEN; 2302 tlphy_ctl |= TLAN_TC_INTEN;
2361 TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl ); 2303 tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
2362 sio = TLan_DioRead8( dev->base_addr, TLAN_NET_SIO ); 2304 sio = tlan_dio_read8(dev->base_addr, TLAN_NET_SIO);
2363 sio |= TLAN_NET_SIO_MINTEN; 2305 sio |= TLAN_NET_SIO_MINTEN;
2364 TLan_DioWrite8( dev->base_addr, TLAN_NET_SIO, sio ); 2306 tlan_dio_write8(dev->base_addr, TLAN_NET_SIO, sio);
2365 } 2307 }
2366 2308
2367 if ( status & MII_GS_LINK ) { 2309 if (status & MII_GS_LINK) {
2368 TLan_SetMac( dev, 0, dev->dev_addr ); 2310 tlan_set_mac(dev, 0, dev->dev_addr);
2369 priv->phyOnline = 1; 2311 priv->phy_online = 1;
2370 outb( ( TLAN_HC_INT_ON >> 8 ), dev->base_addr + TLAN_HOST_CMD + 1 ); 2312 outb((TLAN_HC_INT_ON >> 8), dev->base_addr + TLAN_HOST_CMD + 1);
2371 if ( debug >= 1 && debug != TLAN_DEBUG_PROBE ) { 2313 if (debug >= 1 && debug != TLAN_DEBUG_PROBE)
2372 outb( ( TLAN_HC_REQ_INT >> 8 ), dev->base_addr + TLAN_HOST_CMD + 1 ); 2314 outb((TLAN_HC_REQ_INT >> 8),
2373 } 2315 dev->base_addr + TLAN_HOST_CMD + 1);
2374 outl( priv->rxListDMA, dev->base_addr + TLAN_CH_PARM ); 2316 outl(priv->rx_list_dma, dev->base_addr + TLAN_CH_PARM);
2375 outl( TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD ); 2317 outl(TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD);
2376 netif_carrier_on(dev); 2318 netif_carrier_on(dev);
2377 } else { 2319 } else {
2378 printk( "TLAN: %s: Link inactive, will retry in 10 secs...\n", 2320 netdev_info(dev, "Link inactive, will retry in 10 secs...\n");
2379 dev->name ); 2321 tlan_set_timer(dev, (10*HZ), TLAN_TIMER_FINISH_RESET);
2380 TLan_SetTimer( dev, (10*HZ), TLAN_TIMER_FINISH_RESET );
2381 return; 2322 return;
2382 } 2323 }
2383 TLan_SetMulticastList(dev); 2324 tlan_set_multicast_list(dev);
2384 2325
2385} /* TLan_FinishReset */ 2326}
2386 2327
2387 2328
2388 2329
2389 2330
2390 /*************************************************************** 2331/***************************************************************
2391 * TLan_SetMac 2332 * tlan_set_mac
2392 * 2333 *
2393 * Returns: 2334 * Returns:
2394 * Nothing 2335 * Nothing
2395 * Parms: 2336 * Parms:
2396 * dev Pointer to device structure of adapter 2337 * dev Pointer to device structure of adapter
2397 * on which to change the AREG. 2338 * on which to change the AREG.
2398 * areg The AREG to set the address in (0 - 3). 2339 * areg The AREG to set the address in (0 - 3).
2399 * mac A pointer to an array of chars. Each 2340 * mac A pointer to an array of chars. Each
2400 * element stores one byte of the address. 2341 * element stores one byte of the address.
2401 * IE, it isn't in ascii. 2342 * IE, it isn't in ascii.
2402 * 2343 *
2403 * This function transfers a MAC address to one of the 2344 * This function transfers a MAC address to one of the
2404 * TLAN AREGs (address registers). The TLAN chip locks 2345 * TLAN AREGs (address registers). The TLAN chip locks
2405 * the register on writing to offset 0 and unlocks the 2346 * the register on writing to offset 0 and unlocks the
2406 * register after writing to offset 5. If NULL is passed 2347 * register after writing to offset 5. If NULL is passed
2407 * in mac, then the AREG is filled with 0's. 2348 * in mac, then the AREG is filled with 0's.
2408 * 2349 *
2409 **************************************************************/ 2350 **************************************************************/
2410 2351
2411static void TLan_SetMac( struct net_device *dev, int areg, char *mac ) 2352static void tlan_set_mac(struct net_device *dev, int areg, char *mac)
2412{ 2353{
2413 int i; 2354 int i;
2414 2355
2415 areg *= 6; 2356 areg *= 6;
2416 2357
2417 if ( mac != NULL ) { 2358 if (mac != NULL) {
2418 for ( i = 0; i < 6; i++ ) 2359 for (i = 0; i < 6; i++)
2419 TLan_DioWrite8( dev->base_addr, 2360 tlan_dio_write8(dev->base_addr,
2420 TLAN_AREG_0 + areg + i, mac[i] ); 2361 TLAN_AREG_0 + areg + i, mac[i]);
2421 } else { 2362 } else {
2422 for ( i = 0; i < 6; i++ ) 2363 for (i = 0; i < 6; i++)
2423 TLan_DioWrite8( dev->base_addr, 2364 tlan_dio_write8(dev->base_addr,
2424 TLAN_AREG_0 + areg + i, 0 ); 2365 TLAN_AREG_0 + areg + i, 0);
2425 } 2366 }
2426 2367
2427} /* TLan_SetMac */ 2368}
2428 2369
2429 2370
2430 2371
@@ -2432,205 +2373,199 @@ static void TLan_SetMac( struct net_device *dev, int areg, char *mac )
2432/***************************************************************************** 2373/*****************************************************************************
2433****************************************************************************** 2374******************************************************************************
2434 2375
2435 ThunderLAN Driver PHY Layer Routines 2376ThunderLAN driver PHY layer routines
2436 2377
2437****************************************************************************** 2378******************************************************************************
2438*****************************************************************************/ 2379*****************************************************************************/
2439 2380
2440 2381
2441 2382
2442 /********************************************************************* 2383/*********************************************************************
2443 * TLan_PhyPrint 2384 * tlan_phy_print
2444 * 2385 *
2445 * Returns: 2386 * Returns:
2446 * Nothing 2387 * Nothing
2447 * Parms: 2388 * Parms:
2448 * dev A pointer to the device structure of the 2389 * dev A pointer to the device structure of the
2449 * TLAN device having the PHYs to be detailed. 2390 * TLAN device having the PHYs to be detailed.
2450 * 2391 *
2451 * This function prints the registers a PHY (aka transceiver). 2392 * This function prints the registers a PHY (aka transceiver).
2452 * 2393 *
2453 ********************************************************************/ 2394 ********************************************************************/
2454 2395
2455static void TLan_PhyPrint( struct net_device *dev ) 2396static void tlan_phy_print(struct net_device *dev)
2456{ 2397{
2457 TLanPrivateInfo *priv = netdev_priv(dev); 2398 struct tlan_priv *priv = netdev_priv(dev);
2458 u16 i, data0, data1, data2, data3, phy; 2399 u16 i, data0, data1, data2, data3, phy;
2459 2400
2460 phy = priv->phy[priv->phyNum]; 2401 phy = priv->phy[priv->phy_num];
2461 2402
2462 if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) { 2403 if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
2463 printk( "TLAN: Device %s, Unmanaged PHY.\n", dev->name ); 2404 netdev_info(dev, "Unmanaged PHY\n");
2464 } else if ( phy <= TLAN_PHY_MAX_ADDR ) { 2405 } else if (phy <= TLAN_PHY_MAX_ADDR) {
2465 printk( "TLAN: Device %s, PHY 0x%02x.\n", dev->name, phy ); 2406 netdev_info(dev, "PHY 0x%02x\n", phy);
2466 printk( "TLAN: Off. +0 +1 +2 +3\n" ); 2407 pr_info(" Off. +0 +1 +2 +3\n");
2467 for ( i = 0; i < 0x20; i+= 4 ) { 2408 for (i = 0; i < 0x20; i += 4) {
2468 printk( "TLAN: 0x%02x", i ); 2409 tlan_mii_read_reg(dev, phy, i, &data0);
2469 TLan_MiiReadReg( dev, phy, i, &data0 ); 2410 tlan_mii_read_reg(dev, phy, i + 1, &data1);
2470 printk( " 0x%04hx", data0 ); 2411 tlan_mii_read_reg(dev, phy, i + 2, &data2);
2471 TLan_MiiReadReg( dev, phy, i + 1, &data1 ); 2412 tlan_mii_read_reg(dev, phy, i + 3, &data3);
2472 printk( " 0x%04hx", data1 ); 2413 pr_info(" 0x%02x 0x%04hx 0x%04hx 0x%04hx 0x%04hx\n",
2473 TLan_MiiReadReg( dev, phy, i + 2, &data2 ); 2414 i, data0, data1, data2, data3);
2474 printk( " 0x%04hx", data2 );
2475 TLan_MiiReadReg( dev, phy, i + 3, &data3 );
2476 printk( " 0x%04hx\n", data3 );
2477 } 2415 }
2478 } else { 2416 } else {
2479 printk( "TLAN: Device %s, Invalid PHY.\n", dev->name ); 2417 netdev_info(dev, "Invalid PHY\n");
2480 } 2418 }
2481 2419
2482} /* TLan_PhyPrint */ 2420}
2483 2421
2484 2422
2485 2423
2486 2424
2487 /********************************************************************* 2425/*********************************************************************
2488 * TLan_PhyDetect 2426 * tlan_phy_detect
2489 * 2427 *
2490 * Returns: 2428 * Returns:
2491 * Nothing 2429 * Nothing
2492 * Parms: 2430 * Parms:
2493 * dev A pointer to the device structure of the adapter 2431 * dev A pointer to the device structure of the adapter
2494 * for which the PHY needs determined. 2432 * for which the PHY needs determined.
2495 * 2433 *
2496 * So far I've found that adapters which have external PHYs 2434 * So far I've found that adapters which have external PHYs
2497 * may also use the internal PHY for part of the functionality. 2435 * may also use the internal PHY for part of the functionality.
2498 * (eg, AUI/Thinnet). This function finds out if this TLAN 2436 * (eg, AUI/Thinnet). This function finds out if this TLAN
2499 * chip has an internal PHY, and then finds the first external 2437 * chip has an internal PHY, and then finds the first external
2500 * PHY (starting from address 0) if it exists). 2438 * PHY (starting from address 0) if it exists).
2501 * 2439 *
2502 ********************************************************************/ 2440 ********************************************************************/
2503 2441
2504static void TLan_PhyDetect( struct net_device *dev ) 2442static void tlan_phy_detect(struct net_device *dev)
2505{ 2443{
2506 TLanPrivateInfo *priv = netdev_priv(dev); 2444 struct tlan_priv *priv = netdev_priv(dev);
2507 u16 control; 2445 u16 control;
2508 u16 hi; 2446 u16 hi;
2509 u16 lo; 2447 u16 lo;
2510 u32 phy; 2448 u32 phy;
2511 2449
2512 if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) { 2450 if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
2513 priv->phyNum = 0xFFFF; 2451 priv->phy_num = 0xffff;
2514 return; 2452 return;
2515 } 2453 }
2516 2454
2517 TLan_MiiReadReg( dev, TLAN_PHY_MAX_ADDR, MII_GEN_ID_HI, &hi ); 2455 tlan_mii_read_reg(dev, TLAN_PHY_MAX_ADDR, MII_GEN_ID_HI, &hi);
2518 2456
2519 if ( hi != 0xFFFF ) { 2457 if (hi != 0xffff)
2520 priv->phy[0] = TLAN_PHY_MAX_ADDR; 2458 priv->phy[0] = TLAN_PHY_MAX_ADDR;
2521 } else { 2459 else
2522 priv->phy[0] = TLAN_PHY_NONE; 2460 priv->phy[0] = TLAN_PHY_NONE;
2523 }
2524 2461
2525 priv->phy[1] = TLAN_PHY_NONE; 2462 priv->phy[1] = TLAN_PHY_NONE;
2526 for ( phy = 0; phy <= TLAN_PHY_MAX_ADDR; phy++ ) { 2463 for (phy = 0; phy <= TLAN_PHY_MAX_ADDR; phy++) {
2527 TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &control ); 2464 tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &control);
2528 TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &hi ); 2465 tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &hi);
2529 TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &lo ); 2466 tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &lo);
2530 if ( ( control != 0xFFFF ) || 2467 if ((control != 0xffff) ||
2531 ( hi != 0xFFFF ) || ( lo != 0xFFFF ) ) { 2468 (hi != 0xffff) || (lo != 0xffff)) {
2532 TLAN_DBG( TLAN_DEBUG_GNRL, 2469 TLAN_DBG(TLAN_DEBUG_GNRL,
2533 "PHY found at %02x %04x %04x %04x\n", 2470 "PHY found at %02x %04x %04x %04x\n",
2534 phy, control, hi, lo ); 2471 phy, control, hi, lo);
2535 if ( ( priv->phy[1] == TLAN_PHY_NONE ) && 2472 if ((priv->phy[1] == TLAN_PHY_NONE) &&
2536 ( phy != TLAN_PHY_MAX_ADDR ) ) { 2473 (phy != TLAN_PHY_MAX_ADDR)) {
2537 priv->phy[1] = phy; 2474 priv->phy[1] = phy;
2538 } 2475 }
2539 } 2476 }
2540 } 2477 }
2541 2478
2542 if ( priv->phy[1] != TLAN_PHY_NONE ) { 2479 if (priv->phy[1] != TLAN_PHY_NONE)
2543 priv->phyNum = 1; 2480 priv->phy_num = 1;
2544 } else if ( priv->phy[0] != TLAN_PHY_NONE ) { 2481 else if (priv->phy[0] != TLAN_PHY_NONE)
2545 priv->phyNum = 0; 2482 priv->phy_num = 0;
2546 } else { 2483 else
2547 printk( "TLAN: Cannot initialize device, no PHY was found!\n" ); 2484 netdev_info(dev, "Cannot initialize device, no PHY was found!\n");
2548 }
2549 2485
2550} /* TLan_PhyDetect */ 2486}
2551 2487
2552 2488
2553 2489
2554 2490
2555static void TLan_PhyPowerDown( struct net_device *dev ) 2491static void tlan_phy_power_down(struct net_device *dev)
2556{ 2492{
2557 TLanPrivateInfo *priv = netdev_priv(dev); 2493 struct tlan_priv *priv = netdev_priv(dev);
2558 u16 value; 2494 u16 value;
2559 2495
2560 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Powering down PHY(s).\n", dev->name ); 2496 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering down PHY(s).\n", dev->name);
2561 value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE; 2497 value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE;
2562 TLan_MiiSync( dev->base_addr ); 2498 tlan_mii_sync(dev->base_addr);
2563 TLan_MiiWriteReg( dev, priv->phy[priv->phyNum], MII_GEN_CTL, value ); 2499 tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
2564 if ( ( priv->phyNum == 0 ) && 2500 if ((priv->phy_num == 0) &&
2565 ( priv->phy[1] != TLAN_PHY_NONE ) && 2501 (priv->phy[1] != TLAN_PHY_NONE) &&
2566 ( ! ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) ) ) { 2502 (!(priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10))) {
2567 TLan_MiiSync( dev->base_addr ); 2503 tlan_mii_sync(dev->base_addr);
2568 TLan_MiiWriteReg( dev, priv->phy[1], MII_GEN_CTL, value ); 2504 tlan_mii_write_reg(dev, priv->phy[1], MII_GEN_CTL, value);
2569 } 2505 }
2570 2506
2571 /* Wait for 50 ms and powerup 2507 /* Wait for 50 ms and powerup
2572 * This is abitrary. It is intended to make sure the 2508 * This is abitrary. It is intended to make sure the
2573 * transceiver settles. 2509 * transceiver settles.
2574 */ 2510 */
2575 TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_PUP ); 2511 tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_PUP);
2576 2512
2577} /* TLan_PhyPowerDown */ 2513}
2578 2514
2579 2515
2580 2516
2581 2517
2582static void TLan_PhyPowerUp( struct net_device *dev ) 2518static void tlan_phy_power_up(struct net_device *dev)
2583{ 2519{
2584 TLanPrivateInfo *priv = netdev_priv(dev); 2520 struct tlan_priv *priv = netdev_priv(dev);
2585 u16 value; 2521 u16 value;
2586 2522
2587 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Powering up PHY.\n", dev->name ); 2523 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering up PHY.\n", dev->name);
2588 TLan_MiiSync( dev->base_addr ); 2524 tlan_mii_sync(dev->base_addr);
2589 value = MII_GC_LOOPBK; 2525 value = MII_GC_LOOPBK;
2590 TLan_MiiWriteReg( dev, priv->phy[priv->phyNum], MII_GEN_CTL, value ); 2526 tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
2591 TLan_MiiSync(dev->base_addr); 2527 tlan_mii_sync(dev->base_addr);
2592 /* Wait for 500 ms and reset the 2528 /* Wait for 500 ms and reset the
2593 * transceiver. The TLAN docs say both 50 ms and 2529 * transceiver. The TLAN docs say both 50 ms and
2594 * 500 ms, so do the longer, just in case. 2530 * 500 ms, so do the longer, just in case.
2595 */ 2531 */
2596 TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_RESET ); 2532 tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_RESET);
2597 2533
2598} /* TLan_PhyPowerUp */ 2534}
2599 2535
2600 2536
2601 2537
2602 2538
2603static void TLan_PhyReset( struct net_device *dev ) 2539static void tlan_phy_reset(struct net_device *dev)
2604{ 2540{
2605 TLanPrivateInfo *priv = netdev_priv(dev); 2541 struct tlan_priv *priv = netdev_priv(dev);
2606 u16 phy; 2542 u16 phy;
2607 u16 value; 2543 u16 value;
2608 2544
2609 phy = priv->phy[priv->phyNum]; 2545 phy = priv->phy[priv->phy_num];
2610 2546
2611 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Reseting PHY.\n", dev->name ); 2547 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Reseting PHY.\n", dev->name);
2612 TLan_MiiSync( dev->base_addr ); 2548 tlan_mii_sync(dev->base_addr);
2613 value = MII_GC_LOOPBK | MII_GC_RESET; 2549 value = MII_GC_LOOPBK | MII_GC_RESET;
2614 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, value ); 2550 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, value);
2615 TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &value ); 2551 tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
2616 while ( value & MII_GC_RESET ) { 2552 while (value & MII_GC_RESET)
2617 TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &value ); 2553 tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
2618 }
2619 2554
2620 /* Wait for 500 ms and initialize. 2555 /* Wait for 500 ms and initialize.
2621 * I don't remember why I wait this long. 2556 * I don't remember why I wait this long.
2622 * I've changed this to 50ms, as it seems long enough. 2557 * I've changed this to 50ms, as it seems long enough.
2623 */ 2558 */
2624 TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_START_LINK ); 2559 tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_START_LINK);
2625 2560
2626} /* TLan_PhyReset */ 2561}
2627 2562
2628 2563
2629 2564
2630 2565
2631static void TLan_PhyStartLink( struct net_device *dev ) 2566static void tlan_phy_start_link(struct net_device *dev)
2632{ 2567{
2633 TLanPrivateInfo *priv = netdev_priv(dev); 2568 struct tlan_priv *priv = netdev_priv(dev);
2634 u16 ability; 2569 u16 ability;
2635 u16 control; 2570 u16 control;
2636 u16 data; 2571 u16 data;
@@ -2638,86 +2573,87 @@ static void TLan_PhyStartLink( struct net_device *dev )
2638 u16 status; 2573 u16 status;
2639 u16 tctl; 2574 u16 tctl;
2640 2575
2641 phy = priv->phy[priv->phyNum]; 2576 phy = priv->phy[priv->phy_num];
2642 TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Trying to activate link.\n", dev->name ); 2577 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Trying to activate link.\n", dev->name);
2643 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status ); 2578 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2644 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &ability ); 2579 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &ability);
2645 2580
2646 if ( ( status & MII_GS_AUTONEG ) && 2581 if ((status & MII_GS_AUTONEG) &&
2647 ( ! priv->aui ) ) { 2582 (!priv->aui)) {
2648 ability = status >> 11; 2583 ability = status >> 11;
2649 if ( priv->speed == TLAN_SPEED_10 && 2584 if (priv->speed == TLAN_SPEED_10 &&
2650 priv->duplex == TLAN_DUPLEX_HALF) { 2585 priv->duplex == TLAN_DUPLEX_HALF) {
2651 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0000); 2586 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0000);
2652 } else if ( priv->speed == TLAN_SPEED_10 && 2587 } else if (priv->speed == TLAN_SPEED_10 &&
2653 priv->duplex == TLAN_DUPLEX_FULL) { 2588 priv->duplex == TLAN_DUPLEX_FULL) {
2654 priv->tlanFullDuplex = true; 2589 priv->tlan_full_duplex = true;
2655 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0100); 2590 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0100);
2656 } else if ( priv->speed == TLAN_SPEED_100 && 2591 } else if (priv->speed == TLAN_SPEED_100 &&
2657 priv->duplex == TLAN_DUPLEX_HALF) { 2592 priv->duplex == TLAN_DUPLEX_HALF) {
2658 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2000); 2593 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2000);
2659 } else if ( priv->speed == TLAN_SPEED_100 && 2594 } else if (priv->speed == TLAN_SPEED_100 &&
2660 priv->duplex == TLAN_DUPLEX_FULL) { 2595 priv->duplex == TLAN_DUPLEX_FULL) {
2661 priv->tlanFullDuplex = true; 2596 priv->tlan_full_duplex = true;
2662 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2100); 2597 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2100);
2663 } else { 2598 } else {
2664 2599
2665 /* Set Auto-Neg advertisement */ 2600 /* Set Auto-Neg advertisement */
2666 TLan_MiiWriteReg( dev, phy, MII_AN_ADV, (ability << 5) | 1); 2601 tlan_mii_write_reg(dev, phy, MII_AN_ADV,
2602 (ability << 5) | 1);
2667 /* Enablee Auto-Neg */ 2603 /* Enablee Auto-Neg */
2668 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x1000 ); 2604 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1000);
2669 /* Restart Auto-Neg */ 2605 /* Restart Auto-Neg */
2670 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x1200 ); 2606 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1200);
2671 /* Wait for 4 sec for autonegotiation 2607 /* Wait for 4 sec for autonegotiation
2672 * to complete. The max spec time is less than this 2608 * to complete. The max spec time is less than this
2673 * but the card need additional time to start AN. 2609 * but the card need additional time to start AN.
2674 * .5 sec should be plenty extra. 2610 * .5 sec should be plenty extra.
2675 */ 2611 */
2676 printk( "TLAN: %s: Starting autonegotiation.\n", dev->name ); 2612 netdev_info(dev, "Starting autonegotiation\n");
2677 TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_PHY_FINISH_AN ); 2613 tlan_set_timer(dev, (2*HZ), TLAN_TIMER_PHY_FINISH_AN);
2678 return; 2614 return;
2679 } 2615 }
2680 2616
2681 } 2617 }
2682 2618
2683 if ( ( priv->aui ) && ( priv->phyNum != 0 ) ) { 2619 if ((priv->aui) && (priv->phy_num != 0)) {
2684 priv->phyNum = 0; 2620 priv->phy_num = 0;
2685 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN; 2621 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
2686 TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, data ); 2622 | TLAN_NET_CFG_PHY_EN;
2687 TLan_SetTimer( dev, (40*HZ/1000), TLAN_TIMER_PHY_PDOWN ); 2623 tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
2624 tlan_set_timer(dev, (40*HZ/1000), TLAN_TIMER_PHY_PDOWN);
2688 return; 2625 return;
2689 } else if ( priv->phyNum == 0 ) { 2626 } else if (priv->phy_num == 0) {
2690 control = 0; 2627 control = 0;
2691 TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tctl ); 2628 tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tctl);
2692 if ( priv->aui ) { 2629 if (priv->aui) {
2693 tctl |= TLAN_TC_AUISEL; 2630 tctl |= TLAN_TC_AUISEL;
2694 } else { 2631 } else {
2695 tctl &= ~TLAN_TC_AUISEL; 2632 tctl &= ~TLAN_TC_AUISEL;
2696 if ( priv->duplex == TLAN_DUPLEX_FULL ) { 2633 if (priv->duplex == TLAN_DUPLEX_FULL) {
2697 control |= MII_GC_DUPLEX; 2634 control |= MII_GC_DUPLEX;
2698 priv->tlanFullDuplex = true; 2635 priv->tlan_full_duplex = true;
2699 } 2636 }
2700 if ( priv->speed == TLAN_SPEED_100 ) { 2637 if (priv->speed == TLAN_SPEED_100)
2701 control |= MII_GC_SPEEDSEL; 2638 control |= MII_GC_SPEEDSEL;
2702 }
2703 } 2639 }
2704 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, control ); 2640 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, control);
2705 TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tctl ); 2641 tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tctl);
2706 } 2642 }
2707 2643
2708 /* Wait for 2 sec to give the transceiver time 2644 /* Wait for 2 sec to give the transceiver time
2709 * to establish link. 2645 * to establish link.
2710 */ 2646 */
2711 TLan_SetTimer( dev, (4*HZ), TLAN_TIMER_FINISH_RESET ); 2647 tlan_set_timer(dev, (4*HZ), TLAN_TIMER_FINISH_RESET);
2712 2648
2713} /* TLan_PhyStartLink */ 2649}
2714 2650
2715 2651
2716 2652
2717 2653
2718static void TLan_PhyFinishAutoNeg( struct net_device *dev ) 2654static void tlan_phy_finish_auto_neg(struct net_device *dev)
2719{ 2655{
2720 TLanPrivateInfo *priv = netdev_priv(dev); 2656 struct tlan_priv *priv = netdev_priv(dev);
2721 u16 an_adv; 2657 u16 an_adv;
2722 u16 an_lpa; 2658 u16 an_lpa;
2723 u16 data; 2659 u16 data;
@@ -2725,115 +2661,118 @@ static void TLan_PhyFinishAutoNeg( struct net_device *dev )
2725 u16 phy; 2661 u16 phy;
2726 u16 status; 2662 u16 status;
2727 2663
2728 phy = priv->phy[priv->phyNum]; 2664 phy = priv->phy[priv->phy_num];
2729 2665
2730 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status ); 2666 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2731 udelay( 1000 ); 2667 udelay(1000);
2732 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status ); 2668 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
2733 2669
2734 if ( ! ( status & MII_GS_AUTOCMPLT ) ) { 2670 if (!(status & MII_GS_AUTOCMPLT)) {
2735 /* Wait for 8 sec to give the process 2671 /* Wait for 8 sec to give the process
2736 * more time. Perhaps we should fail after a while. 2672 * more time. Perhaps we should fail after a while.
2737 */ 2673 */
2738 if (!priv->neg_be_verbose++) { 2674 if (!priv->neg_be_verbose++) {
2739 pr_info("TLAN: Giving autonegotiation more time.\n"); 2675 pr_info("Giving autonegotiation more time.\n");
2740 pr_info("TLAN: Please check that your adapter has\n"); 2676 pr_info("Please check that your adapter has\n");
2741 pr_info("TLAN: been properly connected to a HUB or Switch.\n"); 2677 pr_info("been properly connected to a HUB or Switch.\n");
2742 pr_info("TLAN: Trying to establish link in the background...\n"); 2678 pr_info("Trying to establish link in the background...\n");
2743 } 2679 }
2744 TLan_SetTimer( dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN ); 2680 tlan_set_timer(dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN);
2745 return; 2681 return;
2746 } 2682 }
2747 2683
2748 printk( "TLAN: %s: Autonegotiation complete.\n", dev->name ); 2684 netdev_info(dev, "Autonegotiation complete\n");
2749 TLan_MiiReadReg( dev, phy, MII_AN_ADV, &an_adv ); 2685 tlan_mii_read_reg(dev, phy, MII_AN_ADV, &an_adv);
2750 TLan_MiiReadReg( dev, phy, MII_AN_LPA, &an_lpa ); 2686 tlan_mii_read_reg(dev, phy, MII_AN_LPA, &an_lpa);
2751 mode = an_adv & an_lpa & 0x03E0; 2687 mode = an_adv & an_lpa & 0x03E0;
2752 if ( mode & 0x0100 ) { 2688 if (mode & 0x0100)
2753 priv->tlanFullDuplex = true; 2689 priv->tlan_full_duplex = true;
2754 } else if ( ! ( mode & 0x0080 ) && ( mode & 0x0040 ) ) { 2690 else if (!(mode & 0x0080) && (mode & 0x0040))
2755 priv->tlanFullDuplex = true; 2691 priv->tlan_full_duplex = true;
2756 } 2692
2757 2693 if ((!(mode & 0x0180)) &&
2758 if ( ( ! ( mode & 0x0180 ) ) && 2694 (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) &&
2759 ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) && 2695 (priv->phy_num != 0)) {
2760 ( priv->phyNum != 0 ) ) { 2696 priv->phy_num = 0;
2761 priv->phyNum = 0; 2697 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
2762 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN; 2698 | TLAN_NET_CFG_PHY_EN;
2763 TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, data ); 2699 tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
2764 TLan_SetTimer( dev, (400*HZ/1000), TLAN_TIMER_PHY_PDOWN ); 2700 tlan_set_timer(dev, (400*HZ/1000), TLAN_TIMER_PHY_PDOWN);
2765 return; 2701 return;
2766 } 2702 }
2767 2703
2768 if ( priv->phyNum == 0 ) { 2704 if (priv->phy_num == 0) {
2769 if ( ( priv->duplex == TLAN_DUPLEX_FULL ) || 2705 if ((priv->duplex == TLAN_DUPLEX_FULL) ||
2770 ( an_adv & an_lpa & 0x0040 ) ) { 2706 (an_adv & an_lpa & 0x0040)) {
2771 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 2707 tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
2772 MII_GC_AUTOENB | MII_GC_DUPLEX ); 2708 MII_GC_AUTOENB | MII_GC_DUPLEX);
2773 pr_info("TLAN: Starting internal PHY with FULL-DUPLEX\n" ); 2709 netdev_info(dev, "Starting internal PHY with FULL-DUPLEX\n");
2774 } else { 2710 } else {
2775 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, MII_GC_AUTOENB ); 2711 tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
2776 pr_info( "TLAN: Starting internal PHY with HALF-DUPLEX\n" ); 2712 MII_GC_AUTOENB);
2713 netdev_info(dev, "Starting internal PHY with HALF-DUPLEX\n");
2777 } 2714 }
2778 } 2715 }
2779 2716
2780 /* Wait for 100 ms. No reason in partiticular. 2717 /* Wait for 100 ms. No reason in partiticular.
2781 */ 2718 */
2782 TLan_SetTimer( dev, (HZ/10), TLAN_TIMER_FINISH_RESET ); 2719 tlan_set_timer(dev, (HZ/10), TLAN_TIMER_FINISH_RESET);
2783 2720
2784} /* TLan_PhyFinishAutoNeg */ 2721}
2785 2722
2786#ifdef MONITOR 2723#ifdef MONITOR
2787 2724
2788 /********************************************************************* 2725/*********************************************************************
2789 * 2726 *
2790 * TLan_phyMonitor 2727 * tlan_phy_monitor
2791 * 2728 *
2792 * Returns: 2729 * Returns:
2793 * None 2730 * None
2794 * 2731 *
2795 * Params: 2732 * Params:
2796 * dev The device structure of this device. 2733 * dev The device structure of this device.
2797 * 2734 *
2798 * 2735 *
2799 * This function monitors PHY condition by reading the status 2736 * This function monitors PHY condition by reading the status
2800 * register via the MII bus. This can be used to give info 2737 * register via the MII bus. This can be used to give info
2801 * about link changes (up/down), and possible switch to alternate 2738 * about link changes (up/down), and possible switch to alternate
2802 * media. 2739 * media.
2803 * 2740 *
2804 * ******************************************************************/ 2741 *******************************************************************/
2805 2742
2806void TLan_PhyMonitor( struct net_device *dev ) 2743void tlan_phy_monitor(struct net_device *dev)
2807{ 2744{
2808 TLanPrivateInfo *priv = netdev_priv(dev); 2745 struct tlan_priv *priv = netdev_priv(dev);
2809 u16 phy; 2746 u16 phy;
2810 u16 phy_status; 2747 u16 phy_status;
2811 2748
2812 phy = priv->phy[priv->phyNum]; 2749 phy = priv->phy[priv->phy_num];
2813 2750
2814 /* Get PHY status register */ 2751 /* Get PHY status register */
2815 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &phy_status ); 2752 tlan_mii_read_reg(dev, phy, MII_GEN_STS, &phy_status);
2816 2753
2817 /* Check if link has been lost */ 2754 /* Check if link has been lost */
2818 if (!(phy_status & MII_GS_LINK)) { 2755 if (!(phy_status & MII_GS_LINK)) {
2819 if (priv->link) { 2756 if (priv->link) {
2820 priv->link = 0; 2757 priv->link = 0;
2821 printk(KERN_DEBUG "TLAN: %s has lost link\n", dev->name); 2758 printk(KERN_DEBUG "TLAN: %s has lost link\n",
2822 netif_carrier_off(dev); 2759 dev->name);
2823 TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_LINK_BEAT ); 2760 netif_carrier_off(dev);
2824 return; 2761 tlan_set_timer(dev, (2*HZ), TLAN_TIMER_LINK_BEAT);
2762 return;
2825 } 2763 }
2826 } 2764 }
2827 2765
2828 /* Link restablished? */ 2766 /* Link restablished? */
2829 if ((phy_status & MII_GS_LINK) && !priv->link) { 2767 if ((phy_status & MII_GS_LINK) && !priv->link) {
2830 priv->link = 1; 2768 priv->link = 1;
2831 printk(KERN_DEBUG "TLAN: %s has reestablished link\n", dev->name); 2769 printk(KERN_DEBUG "TLAN: %s has reestablished link\n",
2770 dev->name);
2832 netif_carrier_on(dev); 2771 netif_carrier_on(dev);
2833 } 2772 }
2834 2773
2835 /* Setup a new monitor */ 2774 /* Setup a new monitor */
2836 TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_LINK_BEAT ); 2775 tlan_set_timer(dev, (2*HZ), TLAN_TIMER_LINK_BEAT);
2837} 2776}
2838 2777
2839#endif /* MONITOR */ 2778#endif /* MONITOR */
@@ -2842,47 +2781,48 @@ void TLan_PhyMonitor( struct net_device *dev )
2842/***************************************************************************** 2781/*****************************************************************************
2843****************************************************************************** 2782******************************************************************************
2844 2783
2845 ThunderLAN Driver MII Routines 2784ThunderLAN driver MII routines
2846 2785
2847 These routines are based on the information in Chap. 2 of the 2786these routines are based on the information in chap. 2 of the
2848 "ThunderLAN Programmer's Guide", pp. 15-24. 2787"ThunderLAN Programmer's Guide", pp. 15-24.
2849 2788
2850****************************************************************************** 2789******************************************************************************
2851*****************************************************************************/ 2790*****************************************************************************/
2852 2791
2853 2792
2854 /*************************************************************** 2793/***************************************************************
2855 * TLan_MiiReadReg 2794 * tlan_mii_read_reg
2856 * 2795 *
2857 * Returns: 2796 * Returns:
2858 * false if ack received ok 2797 * false if ack received ok
2859 * true if no ack received or other error 2798 * true if no ack received or other error
2860 * 2799 *
2861 * Parms: 2800 * Parms:
2862 * dev The device structure containing 2801 * dev The device structure containing
2863 * The io address and interrupt count 2802 * The io address and interrupt count
2864 * for this device. 2803 * for this device.
2865 * phy The address of the PHY to be queried. 2804 * phy The address of the PHY to be queried.
2866 * reg The register whose contents are to be 2805 * reg The register whose contents are to be
2867 * retrieved. 2806 * retrieved.
2868 * val A pointer to a variable to store the 2807 * val A pointer to a variable to store the
2869 * retrieved value. 2808 * retrieved value.
2870 * 2809 *
2871 * This function uses the TLAN's MII bus to retrieve the contents 2810 * This function uses the TLAN's MII bus to retrieve the contents
2872 * of a given register on a PHY. It sends the appropriate info 2811 * of a given register on a PHY. It sends the appropriate info
2873 * and then reads the 16-bit register value from the MII bus via 2812 * and then reads the 16-bit register value from the MII bus via
2874 * the TLAN SIO register. 2813 * the TLAN SIO register.
2875 * 2814 *
2876 **************************************************************/ 2815 **************************************************************/
2877 2816
2878static bool TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val ) 2817static bool
2818tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg, u16 *val)
2879{ 2819{
2880 u8 nack; 2820 u8 nack;
2881 u16 sio, tmp; 2821 u16 sio, tmp;
2882 u32 i; 2822 u32 i;
2883 bool err; 2823 bool err;
2884 int minten; 2824 int minten;
2885 TLanPrivateInfo *priv = netdev_priv(dev); 2825 struct tlan_priv *priv = netdev_priv(dev);
2886 unsigned long flags = 0; 2826 unsigned long flags = 0;
2887 2827
2888 err = false; 2828 err = false;
@@ -2892,48 +2832,48 @@ static bool TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val
2892 if (!in_irq()) 2832 if (!in_irq())
2893 spin_lock_irqsave(&priv->lock, flags); 2833 spin_lock_irqsave(&priv->lock, flags);
2894 2834
2895 TLan_MiiSync(dev->base_addr); 2835 tlan_mii_sync(dev->base_addr);
2896 2836
2897 minten = TLan_GetBit( TLAN_NET_SIO_MINTEN, sio ); 2837 minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
2898 if ( minten ) 2838 if (minten)
2899 TLan_ClearBit(TLAN_NET_SIO_MINTEN, sio); 2839 tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
2900 2840
2901 TLan_MiiSendData( dev->base_addr, 0x1, 2 ); /* Start ( 01b ) */ 2841 tlan_mii_send_data(dev->base_addr, 0x1, 2); /* start (01b) */
2902 TLan_MiiSendData( dev->base_addr, 0x2, 2 ); /* Read ( 10b ) */ 2842 tlan_mii_send_data(dev->base_addr, 0x2, 2); /* read (10b) */
2903 TLan_MiiSendData( dev->base_addr, phy, 5 ); /* Device # */ 2843 tlan_mii_send_data(dev->base_addr, phy, 5); /* device # */
2904 TLan_MiiSendData( dev->base_addr, reg, 5 ); /* Register # */ 2844 tlan_mii_send_data(dev->base_addr, reg, 5); /* register # */
2905 2845
2906 2846
2907 TLan_ClearBit(TLAN_NET_SIO_MTXEN, sio); /* Change direction */ 2847 tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio); /* change direction */
2908 2848
2909 TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); /* Clock Idle bit */ 2849 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* clock idle bit */
2910 TLan_SetBit(TLAN_NET_SIO_MCLK, sio); 2850 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2911 TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); /* Wait 300ns */ 2851 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* wait 300ns */
2912 2852
2913 nack = TLan_GetBit(TLAN_NET_SIO_MDATA, sio); /* Check for ACK */ 2853 nack = tlan_get_bit(TLAN_NET_SIO_MDATA, sio); /* check for ACK */
2914 TLan_SetBit(TLAN_NET_SIO_MCLK, sio); /* Finish ACK */ 2854 tlan_set_bit(TLAN_NET_SIO_MCLK, sio); /* finish ACK */
2915 if (nack) { /* No ACK, so fake it */ 2855 if (nack) { /* no ACK, so fake it */
2916 for (i = 0; i < 16; i++) { 2856 for (i = 0; i < 16; i++) {
2917 TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); 2857 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2918 TLan_SetBit(TLAN_NET_SIO_MCLK, sio); 2858 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2919 } 2859 }
2920 tmp = 0xffff; 2860 tmp = 0xffff;
2921 err = true; 2861 err = true;
2922 } else { /* ACK, so read data */ 2862 } else { /* ACK, so read data */
2923 for (tmp = 0, i = 0x8000; i; i >>= 1) { 2863 for (tmp = 0, i = 0x8000; i; i >>= 1) {
2924 TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); 2864 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2925 if (TLan_GetBit(TLAN_NET_SIO_MDATA, sio)) 2865 if (tlan_get_bit(TLAN_NET_SIO_MDATA, sio))
2926 tmp |= i; 2866 tmp |= i;
2927 TLan_SetBit(TLAN_NET_SIO_MCLK, sio); 2867 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2928 } 2868 }
2929 } 2869 }
2930 2870
2931 2871
2932 TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); /* Idle cycle */ 2872 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* idle cycle */
2933 TLan_SetBit(TLAN_NET_SIO_MCLK, sio); 2873 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2934 2874
2935 if ( minten ) 2875 if (minten)
2936 TLan_SetBit(TLAN_NET_SIO_MINTEN, sio); 2876 tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
2937 2877
2938 *val = tmp; 2878 *val = tmp;
2939 2879
@@ -2942,116 +2882,117 @@ static bool TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val
2942 2882
2943 return err; 2883 return err;
2944 2884
2945} /* TLan_MiiReadReg */ 2885}
2946 2886
2947 2887
2948 2888
2949 2889
2950 /*************************************************************** 2890/***************************************************************
2951 * TLan_MiiSendData 2891 * tlan_mii_send_data
2952 * 2892 *
2953 * Returns: 2893 * Returns:
2954 * Nothing 2894 * Nothing
2955 * Parms: 2895 * Parms:
2956 * base_port The base IO port of the adapter in 2896 * base_port The base IO port of the adapter in
2957 * question. 2897 * question.
2958 * dev The address of the PHY to be queried. 2898 * dev The address of the PHY to be queried.
2959 * data The value to be placed on the MII bus. 2899 * data The value to be placed on the MII bus.
2960 * num_bits The number of bits in data that are to 2900 * num_bits The number of bits in data that are to
2961 * be placed on the MII bus. 2901 * be placed on the MII bus.
2962 * 2902 *
2963 * This function sends on sequence of bits on the MII 2903 * This function sends on sequence of bits on the MII
2964 * configuration bus. 2904 * configuration bus.
2965 * 2905 *
2966 **************************************************************/ 2906 **************************************************************/
2967 2907
2968static void TLan_MiiSendData( u16 base_port, u32 data, unsigned num_bits ) 2908static void tlan_mii_send_data(u16 base_port, u32 data, unsigned num_bits)
2969{ 2909{
2970 u16 sio; 2910 u16 sio;
2971 u32 i; 2911 u32 i;
2972 2912
2973 if ( num_bits == 0 ) 2913 if (num_bits == 0)
2974 return; 2914 return;
2975 2915
2976 outw( TLAN_NET_SIO, base_port + TLAN_DIO_ADR ); 2916 outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
2977 sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO; 2917 sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
2978 TLan_SetBit( TLAN_NET_SIO_MTXEN, sio ); 2918 tlan_set_bit(TLAN_NET_SIO_MTXEN, sio);
2979 2919
2980 for ( i = ( 0x1 << ( num_bits - 1 ) ); i; i >>= 1 ) { 2920 for (i = (0x1 << (num_bits - 1)); i; i >>= 1) {
2981 TLan_ClearBit( TLAN_NET_SIO_MCLK, sio ); 2921 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
2982 (void) TLan_GetBit( TLAN_NET_SIO_MCLK, sio ); 2922 (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
2983 if ( data & i ) 2923 if (data & i)
2984 TLan_SetBit( TLAN_NET_SIO_MDATA, sio ); 2924 tlan_set_bit(TLAN_NET_SIO_MDATA, sio);
2985 else 2925 else
2986 TLan_ClearBit( TLAN_NET_SIO_MDATA, sio ); 2926 tlan_clear_bit(TLAN_NET_SIO_MDATA, sio);
2987 TLan_SetBit( TLAN_NET_SIO_MCLK, sio ); 2927 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
2988 (void) TLan_GetBit( TLAN_NET_SIO_MCLK, sio ); 2928 (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
2989 } 2929 }
2990 2930
2991} /* TLan_MiiSendData */ 2931}
2992 2932
2993 2933
2994 2934
2995 2935
2996 /*************************************************************** 2936/***************************************************************
2997 * TLan_MiiSync 2937 * TLan_MiiSync
2998 * 2938 *
2999 * Returns: 2939 * Returns:
3000 * Nothing 2940 * Nothing
3001 * Parms: 2941 * Parms:
3002 * base_port The base IO port of the adapter in 2942 * base_port The base IO port of the adapter in
3003 * question. 2943 * question.
3004 * 2944 *
3005 * This functions syncs all PHYs in terms of the MII configuration 2945 * This functions syncs all PHYs in terms of the MII configuration
3006 * bus. 2946 * bus.
3007 * 2947 *
3008 **************************************************************/ 2948 **************************************************************/
3009 2949
3010static void TLan_MiiSync( u16 base_port ) 2950static void tlan_mii_sync(u16 base_port)
3011{ 2951{
3012 int i; 2952 int i;
3013 u16 sio; 2953 u16 sio;
3014 2954
3015 outw( TLAN_NET_SIO, base_port + TLAN_DIO_ADR ); 2955 outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
3016 sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO; 2956 sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
3017 2957
3018 TLan_ClearBit( TLAN_NET_SIO_MTXEN, sio ); 2958 tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio);
3019 for ( i = 0; i < 32; i++ ) { 2959 for (i = 0; i < 32; i++) {
3020 TLan_ClearBit( TLAN_NET_SIO_MCLK, sio ); 2960 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
3021 TLan_SetBit( TLAN_NET_SIO_MCLK, sio ); 2961 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
3022 } 2962 }
3023 2963
3024} /* TLan_MiiSync */ 2964}
3025 2965
3026 2966
3027 2967
3028 2968
3029 /*************************************************************** 2969/***************************************************************
3030 * TLan_MiiWriteReg 2970 * tlan_mii_write_reg
3031 * 2971 *
3032 * Returns: 2972 * Returns:
3033 * Nothing 2973 * Nothing
3034 * Parms: 2974 * Parms:
3035 * dev The device structure for the device 2975 * dev The device structure for the device
3036 * to write to. 2976 * to write to.
3037 * phy The address of the PHY to be written to. 2977 * phy The address of the PHY to be written to.
3038 * reg The register whose contents are to be 2978 * reg The register whose contents are to be
3039 * written. 2979 * written.
3040 * val The value to be written to the register. 2980 * val The value to be written to the register.
3041 * 2981 *
3042 * This function uses the TLAN's MII bus to write the contents of a 2982 * This function uses the TLAN's MII bus to write the contents of a
3043 * given register on a PHY. It sends the appropriate info and then 2983 * given register on a PHY. It sends the appropriate info and then
3044 * writes the 16-bit register value from the MII configuration bus 2984 * writes the 16-bit register value from the MII configuration bus
3045 * via the TLAN SIO register. 2985 * via the TLAN SIO register.
3046 * 2986 *
3047 **************************************************************/ 2987 **************************************************************/
3048 2988
3049static void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val ) 2989static void
2990tlan_mii_write_reg(struct net_device *dev, u16 phy, u16 reg, u16 val)
3050{ 2991{
3051 u16 sio; 2992 u16 sio;
3052 int minten; 2993 int minten;
3053 unsigned long flags = 0; 2994 unsigned long flags = 0;
3054 TLanPrivateInfo *priv = netdev_priv(dev); 2995 struct tlan_priv *priv = netdev_priv(dev);
3055 2996
3056 outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR); 2997 outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
3057 sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO; 2998 sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
@@ -3059,30 +3000,30 @@ static void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val
3059 if (!in_irq()) 3000 if (!in_irq())
3060 spin_lock_irqsave(&priv->lock, flags); 3001 spin_lock_irqsave(&priv->lock, flags);
3061 3002
3062 TLan_MiiSync( dev->base_addr ); 3003 tlan_mii_sync(dev->base_addr);
3063 3004
3064 minten = TLan_GetBit( TLAN_NET_SIO_MINTEN, sio ); 3005 minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
3065 if ( minten ) 3006 if (minten)
3066 TLan_ClearBit( TLAN_NET_SIO_MINTEN, sio ); 3007 tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
3067 3008
3068 TLan_MiiSendData( dev->base_addr, 0x1, 2 ); /* Start ( 01b ) */ 3009 tlan_mii_send_data(dev->base_addr, 0x1, 2); /* start (01b) */
3069 TLan_MiiSendData( dev->base_addr, 0x1, 2 ); /* Write ( 01b ) */ 3010 tlan_mii_send_data(dev->base_addr, 0x1, 2); /* write (01b) */
3070 TLan_MiiSendData( dev->base_addr, phy, 5 ); /* Device # */ 3011 tlan_mii_send_data(dev->base_addr, phy, 5); /* device # */
3071 TLan_MiiSendData( dev->base_addr, reg, 5 ); /* Register # */ 3012 tlan_mii_send_data(dev->base_addr, reg, 5); /* register # */
3072 3013
3073 TLan_MiiSendData( dev->base_addr, 0x2, 2 ); /* Send ACK */ 3014 tlan_mii_send_data(dev->base_addr, 0x2, 2); /* send ACK */
3074 TLan_MiiSendData( dev->base_addr, val, 16 ); /* Send Data */ 3015 tlan_mii_send_data(dev->base_addr, val, 16); /* send data */
3075 3016
3076 TLan_ClearBit( TLAN_NET_SIO_MCLK, sio ); /* Idle cycle */ 3017 tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* idle cycle */
3077 TLan_SetBit( TLAN_NET_SIO_MCLK, sio ); 3018 tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
3078 3019
3079 if ( minten ) 3020 if (minten)
3080 TLan_SetBit( TLAN_NET_SIO_MINTEN, sio ); 3021 tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
3081 3022
3082 if (!in_irq()) 3023 if (!in_irq())
3083 spin_unlock_irqrestore(&priv->lock, flags); 3024 spin_unlock_irqrestore(&priv->lock, flags);
3084 3025
3085} /* TLan_MiiWriteReg */ 3026}
3086 3027
3087 3028
3088 3029
@@ -3090,229 +3031,226 @@ static void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val
3090/***************************************************************************** 3031/*****************************************************************************
3091****************************************************************************** 3032******************************************************************************
3092 3033
3093 ThunderLAN Driver Eeprom routines 3034ThunderLAN driver eeprom routines
3094 3035
3095 The Compaq Netelligent 10 and 10/100 cards use a Microchip 24C02A 3036the Compaq netelligent 10 and 10/100 cards use a microchip 24C02A
3096 EEPROM. These functions are based on information in Microchip's 3037EEPROM. these functions are based on information in microchip's
3097 data sheet. I don't know how well this functions will work with 3038data sheet. I don't know how well this functions will work with
3098 other EEPROMs. 3039other Eeproms.
3099 3040
3100****************************************************************************** 3041******************************************************************************
3101*****************************************************************************/ 3042*****************************************************************************/
3102 3043
3103 3044
3104 /*************************************************************** 3045/***************************************************************
3105 * TLan_EeSendStart 3046 * tlan_ee_send_start
3106 * 3047 *
3107 * Returns: 3048 * Returns:
3108 * Nothing 3049 * Nothing
3109 * Parms: 3050 * Parms:
3110 * io_base The IO port base address for the 3051 * io_base The IO port base address for the
3111 * TLAN device with the EEPROM to 3052 * TLAN device with the EEPROM to
3112 * use. 3053 * use.
3113 * 3054 *
3114 * This function sends a start cycle to an EEPROM attached 3055 * This function sends a start cycle to an EEPROM attached
3115 * to a TLAN chip. 3056 * to a TLAN chip.
3116 * 3057 *
3117 **************************************************************/ 3058 **************************************************************/
3118 3059
3119static void TLan_EeSendStart( u16 io_base ) 3060static void tlan_ee_send_start(u16 io_base)
3120{ 3061{
3121 u16 sio; 3062 u16 sio;
3122 3063
3123 outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR ); 3064 outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
3124 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO; 3065 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
3125 3066
3126 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); 3067 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3127 TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); 3068 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3128 TLan_SetBit( TLAN_NET_SIO_ETXEN, sio ); 3069 tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
3129 TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); 3070 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3130 TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio ); 3071 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3131 3072
3132} /* TLan_EeSendStart */ 3073}
3133 3074
3134 3075
3135 3076
3136 3077
3137 /*************************************************************** 3078/***************************************************************
3138 * TLan_EeSendByte 3079 * tlan_ee_send_byte
3139 * 3080 *
3140 * Returns: 3081 * Returns:
3141 * If the correct ack was received, 0, otherwise 1 3082 * If the correct ack was received, 0, otherwise 1
3142 * Parms: io_base The IO port base address for the 3083 * Parms: io_base The IO port base address for the
3143 * TLAN device with the EEPROM to 3084 * TLAN device with the EEPROM to
3144 * use. 3085 * use.
3145 * data The 8 bits of information to 3086 * data The 8 bits of information to
3146 * send to the EEPROM. 3087 * send to the EEPROM.
3147 * stop If TLAN_EEPROM_STOP is passed, a 3088 * stop If TLAN_EEPROM_STOP is passed, a
3148 * stop cycle is sent after the 3089 * stop cycle is sent after the
3149 * byte is sent after the ack is 3090 * byte is sent after the ack is
3150 * read. 3091 * read.
3151 * 3092 *
3152 * This function sends a byte on the serial EEPROM line, 3093 * This function sends a byte on the serial EEPROM line,
3153 * driving the clock to send each bit. The function then 3094 * driving the clock to send each bit. The function then
3154 * reverses transmission direction and reads an acknowledge 3095 * reverses transmission direction and reads an acknowledge
3155 * bit. 3096 * bit.
3156 * 3097 *
3157 **************************************************************/ 3098 **************************************************************/
3158 3099
3159static int TLan_EeSendByte( u16 io_base, u8 data, int stop ) 3100static int tlan_ee_send_byte(u16 io_base, u8 data, int stop)
3160{ 3101{
3161 int err; 3102 int err;
3162 u8 place; 3103 u8 place;
3163 u16 sio; 3104 u16 sio;
3164 3105
3165 outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR ); 3106 outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
3166 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO; 3107 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
3167 3108
3168 /* Assume clock is low, tx is enabled; */ 3109 /* Assume clock is low, tx is enabled; */
3169 for ( place = 0x80; place != 0; place >>= 1 ) { 3110 for (place = 0x80; place != 0; place >>= 1) {
3170 if ( place & data ) 3111 if (place & data)
3171 TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); 3112 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3172 else 3113 else
3173 TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); 3114 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3174 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); 3115 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3175 TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio ); 3116 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3176 } 3117 }
3177 TLan_ClearBit( TLAN_NET_SIO_ETXEN, sio ); 3118 tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
3178 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); 3119 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3179 err = TLan_GetBit( TLAN_NET_SIO_EDATA, sio ); 3120 err = tlan_get_bit(TLAN_NET_SIO_EDATA, sio);
3180 TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio ); 3121 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3181 TLan_SetBit( TLAN_NET_SIO_ETXEN, sio ); 3122 tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
3182 3123
3183 if ( ( ! err ) && stop ) { 3124 if ((!err) && stop) {
3184 /* STOP, raise data while clock is high */ 3125 /* STOP, raise data while clock is high */
3185 TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); 3126 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3186 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); 3127 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3187 TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); 3128 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3188 } 3129 }
3189 3130
3190 return err; 3131 return err;
3191 3132
3192} /* TLan_EeSendByte */ 3133}
3193 3134
3194 3135
3195 3136
3196 3137
3197 /*************************************************************** 3138/***************************************************************
3198 * TLan_EeReceiveByte 3139 * tlan_ee_receive_byte
3199 * 3140 *
3200 * Returns: 3141 * Returns:
3201 * Nothing 3142 * Nothing
3202 * Parms: 3143 * Parms:
3203 * io_base The IO port base address for the 3144 * io_base The IO port base address for the
3204 * TLAN device with the EEPROM to 3145 * TLAN device with the EEPROM to
3205 * use. 3146 * use.
3206 * data An address to a char to hold the 3147 * data An address to a char to hold the
3207 * data sent from the EEPROM. 3148 * data sent from the EEPROM.
3208 * stop If TLAN_EEPROM_STOP is passed, a 3149 * stop If TLAN_EEPROM_STOP is passed, a
3209 * stop cycle is sent after the 3150 * stop cycle is sent after the
3210 * byte is received, and no ack is 3151 * byte is received, and no ack is
3211 * sent. 3152 * sent.
3212 * 3153 *
3213 * This function receives 8 bits of data from the EEPROM 3154 * This function receives 8 bits of data from the EEPROM
3214 * over the serial link. It then sends and ack bit, or no 3155 * over the serial link. It then sends and ack bit, or no
3215 * ack and a stop bit. This function is used to retrieve 3156 * ack and a stop bit. This function is used to retrieve
3216 * data after the address of a byte in the EEPROM has been 3157 * data after the address of a byte in the EEPROM has been
3217 * sent. 3158 * sent.
3218 * 3159 *
3219 **************************************************************/ 3160 **************************************************************/
3220 3161
3221static void TLan_EeReceiveByte( u16 io_base, u8 *data, int stop ) 3162static void tlan_ee_receive_byte(u16 io_base, u8 *data, int stop)
3222{ 3163{
3223 u8 place; 3164 u8 place;
3224 u16 sio; 3165 u16 sio;
3225 3166
3226 outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR ); 3167 outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
3227 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO; 3168 sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
3228 *data = 0; 3169 *data = 0;
3229 3170
3230 /* Assume clock is low, tx is enabled; */ 3171 /* Assume clock is low, tx is enabled; */
3231 TLan_ClearBit( TLAN_NET_SIO_ETXEN, sio ); 3172 tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
3232 for ( place = 0x80; place; place >>= 1 ) { 3173 for (place = 0x80; place; place >>= 1) {
3233 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); 3174 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3234 if ( TLan_GetBit( TLAN_NET_SIO_EDATA, sio ) ) 3175 if (tlan_get_bit(TLAN_NET_SIO_EDATA, sio))
3235 *data |= place; 3176 *data |= place;
3236 TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio ); 3177 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3237 } 3178 }
3238 3179
3239 TLan_SetBit( TLAN_NET_SIO_ETXEN, sio ); 3180 tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
3240 if ( ! stop ) { 3181 if (!stop) {
3241 TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); /* Ack = 0 */ 3182 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio); /* ack = 0 */
3242 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); 3183 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3243 TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio ); 3184 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3244 } else { 3185 } else {
3245 TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); /* No ack = 1 (?) */ 3186 tlan_set_bit(TLAN_NET_SIO_EDATA, sio); /* no ack = 1 (?) */
3246 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); 3187 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3247 TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio ); 3188 tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
3248 /* STOP, raise data while clock is high */ 3189 /* STOP, raise data while clock is high */
3249 TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); 3190 tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
3250 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); 3191 tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
3251 TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); 3192 tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
3252 } 3193 }
3253 3194
3254} /* TLan_EeReceiveByte */ 3195}
3255 3196
3256 3197
3257 3198
3258 3199
3259 /*************************************************************** 3200/***************************************************************
3260 * TLan_EeReadByte 3201 * tlan_ee_read_byte
3261 * 3202 *
3262 * Returns: 3203 * Returns:
3263 * No error = 0, else, the stage at which the error 3204 * No error = 0, else, the stage at which the error
3264 * occurred. 3205 * occurred.
3265 * Parms: 3206 * Parms:
3266 * io_base The IO port base address for the 3207 * io_base The IO port base address for the
3267 * TLAN device with the EEPROM to 3208 * TLAN device with the EEPROM to
3268 * use. 3209 * use.
3269 * ee_addr The address of the byte in the 3210 * ee_addr The address of the byte in the
3270 * EEPROM whose contents are to be 3211 * EEPROM whose contents are to be
3271 * retrieved. 3212 * retrieved.
3272 * data An address to a char to hold the 3213 * data An address to a char to hold the
3273 * data obtained from the EEPROM. 3214 * data obtained from the EEPROM.
3274 * 3215 *
3275 * This function reads a byte of information from an byte 3216 * This function reads a byte of information from an byte
3276 * cell in the EEPROM. 3217 * cell in the EEPROM.
3277 * 3218 *
3278 **************************************************************/ 3219 **************************************************************/
3279 3220
3280static int TLan_EeReadByte( struct net_device *dev, u8 ee_addr, u8 *data ) 3221static int tlan_ee_read_byte(struct net_device *dev, u8 ee_addr, u8 *data)
3281{ 3222{
3282 int err; 3223 int err;
3283 TLanPrivateInfo *priv = netdev_priv(dev); 3224 struct tlan_priv *priv = netdev_priv(dev);
3284 unsigned long flags = 0; 3225 unsigned long flags = 0;
3285 int ret=0; 3226 int ret = 0;
3286 3227
3287 spin_lock_irqsave(&priv->lock, flags); 3228 spin_lock_irqsave(&priv->lock, flags);
3288 3229
3289 TLan_EeSendStart( dev->base_addr ); 3230 tlan_ee_send_start(dev->base_addr);
3290 err = TLan_EeSendByte( dev->base_addr, 0xA0, TLAN_EEPROM_ACK ); 3231 err = tlan_ee_send_byte(dev->base_addr, 0xa0, TLAN_EEPROM_ACK);
3291 if (err) 3232 if (err) {
3292 { 3233 ret = 1;
3293 ret=1;
3294 goto fail; 3234 goto fail;
3295 } 3235 }
3296 err = TLan_EeSendByte( dev->base_addr, ee_addr, TLAN_EEPROM_ACK ); 3236 err = tlan_ee_send_byte(dev->base_addr, ee_addr, TLAN_EEPROM_ACK);
3297 if (err) 3237 if (err) {
3298 { 3238 ret = 2;
3299 ret=2;
3300 goto fail; 3239 goto fail;
3301 } 3240 }
3302 TLan_EeSendStart( dev->base_addr ); 3241 tlan_ee_send_start(dev->base_addr);
3303 err = TLan_EeSendByte( dev->base_addr, 0xA1, TLAN_EEPROM_ACK ); 3242 err = tlan_ee_send_byte(dev->base_addr, 0xa1, TLAN_EEPROM_ACK);
3304 if (err) 3243 if (err) {
3305 { 3244 ret = 3;
3306 ret=3;
3307 goto fail; 3245 goto fail;
3308 } 3246 }
3309 TLan_EeReceiveByte( dev->base_addr, data, TLAN_EEPROM_STOP ); 3247 tlan_ee_receive_byte(dev->base_addr, data, TLAN_EEPROM_STOP);
3310fail: 3248fail:
3311 spin_unlock_irqrestore(&priv->lock, flags); 3249 spin_unlock_irqrestore(&priv->lock, flags);
3312 3250
3313 return ret; 3251 return ret;
3314 3252
3315} /* TLan_EeReadByte */ 3253}
3316 3254
3317 3255
3318 3256
diff --git a/drivers/net/tlan.h b/drivers/net/tlan.h
index 3315ced774e..5fc98a8e488 100644
--- a/drivers/net/tlan.h
+++ b/drivers/net/tlan.h
@@ -20,8 +20,8 @@
20 ********************************************************************/ 20 ********************************************************************/
21 21
22 22
23#include <asm/io.h> 23#include <linux/io.h>
24#include <asm/types.h> 24#include <linux/types.h>
25#include <linux/netdevice.h> 25#include <linux/netdevice.h>
26 26
27 27
@@ -40,8 +40,11 @@
40#define TLAN_IGNORE 0 40#define TLAN_IGNORE 0
41#define TLAN_RECORD 1 41#define TLAN_RECORD 1
42 42
43#define TLAN_DBG(lvl, format, args...) \ 43#define TLAN_DBG(lvl, format, args...) \
44 do { if (debug&lvl) printk(KERN_DEBUG "TLAN: " format, ##args ); } while(0) 44 do { \
45 if (debug&lvl) \
46 printk(KERN_DEBUG "TLAN: " format, ##args); \
47 } while (0)
45 48
46#define TLAN_DEBUG_GNRL 0x0001 49#define TLAN_DEBUG_GNRL 0x0001
47#define TLAN_DEBUG_TX 0x0002 50#define TLAN_DEBUG_TX 0x0002
@@ -50,7 +53,8 @@
50#define TLAN_DEBUG_PROBE 0x0010 53#define TLAN_DEBUG_PROBE 0x0010
51 54
52#define TX_TIMEOUT (10*HZ) /* We need time for auto-neg */ 55#define TX_TIMEOUT (10*HZ) /* We need time for auto-neg */
53#define MAX_TLAN_BOARDS 8 /* Max number of boards installed at a time */ 56#define MAX_TLAN_BOARDS 8 /* Max number of boards installed
57 at a time */
54 58
55 59
56 /***************************************************************** 60 /*****************************************************************
@@ -70,13 +74,13 @@
70#define PCI_DEVICE_ID_OLICOM_OC2326 0x0014 74#define PCI_DEVICE_ID_OLICOM_OC2326 0x0014
71#endif 75#endif
72 76
73typedef struct tlan_adapter_entry { 77struct tlan_adapter_entry {
74 u16 vendorId; 78 u16 vendor_id;
75 u16 deviceId; 79 u16 device_id;
76 char *deviceLabel; 80 char *device_label;
77 u32 flags; 81 u32 flags;
78 u16 addrOfs; 82 u16 addr_ofs;
79} TLanAdapterEntry; 83};
80 84
81#define TLAN_ADAPTER_NONE 0x00000000 85#define TLAN_ADAPTER_NONE 0x00000000
82#define TLAN_ADAPTER_UNMANAGED_PHY 0x00000001 86#define TLAN_ADAPTER_UNMANAGED_PHY 0x00000001
@@ -129,18 +133,18 @@ typedef struct tlan_adapter_entry {
129#define TLAN_CSTAT_DP_PR 0x0100 133#define TLAN_CSTAT_DP_PR 0x0100
130 134
131 135
132typedef struct tlan_buffer_ref_tag { 136struct tlan_buffer {
133 u32 count; 137 u32 count;
134 u32 address; 138 u32 address;
135} TLanBufferRef; 139};
136 140
137 141
138typedef struct tlan_list_tag { 142struct tlan_list {
139 u32 forward; 143 u32 forward;
140 u16 cStat; 144 u16 c_stat;
141 u16 frameSize; 145 u16 frame_size;
142 TLanBufferRef buffer[TLAN_BUFFERS_PER_LIST]; 146 struct tlan_buffer buffer[TLAN_BUFFERS_PER_LIST];
143} TLanList; 147};
144 148
145 149
146typedef u8 TLanBuffer[TLAN_MAX_FRAME_SIZE]; 150typedef u8 TLanBuffer[TLAN_MAX_FRAME_SIZE];
@@ -164,49 +168,49 @@ typedef u8 TLanBuffer[TLAN_MAX_FRAME_SIZE];
164 * 168 *
165 ****************************************************************/ 169 ****************************************************************/
166 170
167typedef struct tlan_private_tag { 171struct tlan_priv {
168 struct net_device *nextDevice; 172 struct net_device *next_device;
169 struct pci_dev *pciDev; 173 struct pci_dev *pci_dev;
170 struct net_device *dev; 174 struct net_device *dev;
171 void *dmaStorage; 175 void *dma_storage;
172 dma_addr_t dmaStorageDMA; 176 dma_addr_t dma_storage_dma;
173 unsigned int dmaSize; 177 unsigned int dma_size;
174 u8 *padBuffer; 178 u8 *pad_buffer;
175 TLanList *rxList; 179 struct tlan_list *rx_list;
176 dma_addr_t rxListDMA; 180 dma_addr_t rx_list_dma;
177 u8 *rxBuffer; 181 u8 *rx_buffer;
178 dma_addr_t rxBufferDMA; 182 dma_addr_t rx_buffer_dma;
179 u32 rxHead; 183 u32 rx_head;
180 u32 rxTail; 184 u32 rx_tail;
181 u32 rxEocCount; 185 u32 rx_eoc_count;
182 TLanList *txList; 186 struct tlan_list *tx_list;
183 dma_addr_t txListDMA; 187 dma_addr_t tx_list_dma;
184 u8 *txBuffer; 188 u8 *tx_buffer;
185 dma_addr_t txBufferDMA; 189 dma_addr_t tx_buffer_dma;
186 u32 txHead; 190 u32 tx_head;
187 u32 txInProgress; 191 u32 tx_in_progress;
188 u32 txTail; 192 u32 tx_tail;
189 u32 txBusyCount; 193 u32 tx_busy_count;
190 u32 phyOnline; 194 u32 phy_online;
191 u32 timerSetAt; 195 u32 timer_set_at;
192 u32 timerType; 196 u32 timer_type;
193 struct timer_list timer; 197 struct timer_list timer;
194 struct board *adapter; 198 struct board *adapter;
195 u32 adapterRev; 199 u32 adapter_rev;
196 u32 aui; 200 u32 aui;
197 u32 debug; 201 u32 debug;
198 u32 duplex; 202 u32 duplex;
199 u32 phy[2]; 203 u32 phy[2];
200 u32 phyNum; 204 u32 phy_num;
201 u32 speed; 205 u32 speed;
202 u8 tlanRev; 206 u8 tlan_rev;
203 u8 tlanFullDuplex; 207 u8 tlan_full_duplex;
204 spinlock_t lock; 208 spinlock_t lock;
205 u8 link; 209 u8 link;
206 u8 is_eisa; 210 u8 is_eisa;
207 struct work_struct tlan_tqueue; 211 struct work_struct tlan_tqueue;
208 u8 neg_be_verbose; 212 u8 neg_be_verbose;
209} TLanPrivateInfo; 213};
210 214
211 215
212 216
@@ -247,7 +251,7 @@ typedef struct tlan_private_tag {
247 ****************************************************************/ 251 ****************************************************************/
248 252
249#define TLAN_HOST_CMD 0x00 253#define TLAN_HOST_CMD 0x00
250#define TLAN_HC_GO 0x80000000 254#define TLAN_HC_GO 0x80000000
251#define TLAN_HC_STOP 0x40000000 255#define TLAN_HC_STOP 0x40000000
252#define TLAN_HC_ACK 0x20000000 256#define TLAN_HC_ACK 0x20000000
253#define TLAN_HC_CS_MASK 0x1FE00000 257#define TLAN_HC_CS_MASK 0x1FE00000
@@ -283,7 +287,7 @@ typedef struct tlan_private_tag {
283#define TLAN_NET_CMD_TRFRAM 0x02 287#define TLAN_NET_CMD_TRFRAM 0x02
284#define TLAN_NET_CMD_TXPACE 0x01 288#define TLAN_NET_CMD_TXPACE 0x01
285#define TLAN_NET_SIO 0x01 289#define TLAN_NET_SIO 0x01
286#define TLAN_NET_SIO_MINTEN 0x80 290#define TLAN_NET_SIO_MINTEN 0x80
287#define TLAN_NET_SIO_ECLOK 0x40 291#define TLAN_NET_SIO_ECLOK 0x40
288#define TLAN_NET_SIO_ETXEN 0x20 292#define TLAN_NET_SIO_ETXEN 0x20
289#define TLAN_NET_SIO_EDATA 0x10 293#define TLAN_NET_SIO_EDATA 0x10
@@ -304,7 +308,7 @@ typedef struct tlan_private_tag {
304#define TLAN_NET_MASK_MASK4 0x10 308#define TLAN_NET_MASK_MASK4 0x10
305#define TLAN_NET_MASK_RSRVD 0x0F 309#define TLAN_NET_MASK_RSRVD 0x0F
306#define TLAN_NET_CONFIG 0x04 310#define TLAN_NET_CONFIG 0x04
307#define TLAN_NET_CFG_RCLK 0x8000 311#define TLAN_NET_CFG_RCLK 0x8000
308#define TLAN_NET_CFG_TCLK 0x4000 312#define TLAN_NET_CFG_TCLK 0x4000
309#define TLAN_NET_CFG_BIT 0x2000 313#define TLAN_NET_CFG_BIT 0x2000
310#define TLAN_NET_CFG_RXCRC 0x1000 314#define TLAN_NET_CFG_RXCRC 0x1000
@@ -372,7 +376,7 @@ typedef struct tlan_private_tag {
372/* Generic MII/PHY Registers */ 376/* Generic MII/PHY Registers */
373 377
374#define MII_GEN_CTL 0x00 378#define MII_GEN_CTL 0x00
375#define MII_GC_RESET 0x8000 379#define MII_GC_RESET 0x8000
376#define MII_GC_LOOPBK 0x4000 380#define MII_GC_LOOPBK 0x4000
377#define MII_GC_SPEEDSEL 0x2000 381#define MII_GC_SPEEDSEL 0x2000
378#define MII_GC_AUTOENB 0x1000 382#define MII_GC_AUTOENB 0x1000
@@ -397,9 +401,9 @@ typedef struct tlan_private_tag {
397#define MII_GS_EXTCAP 0x0001 401#define MII_GS_EXTCAP 0x0001
398#define MII_GEN_ID_HI 0x02 402#define MII_GEN_ID_HI 0x02
399#define MII_GEN_ID_LO 0x03 403#define MII_GEN_ID_LO 0x03
400#define MII_GIL_OUI 0xFC00 404#define MII_GIL_OUI 0xFC00
401#define MII_GIL_MODEL 0x03F0 405#define MII_GIL_MODEL 0x03F0
402#define MII_GIL_REVISION 0x000F 406#define MII_GIL_REVISION 0x000F
403#define MII_AN_ADV 0x04 407#define MII_AN_ADV 0x04
404#define MII_AN_LPA 0x05 408#define MII_AN_LPA 0x05
405#define MII_AN_EXP 0x06 409#define MII_AN_EXP 0x06
@@ -408,7 +412,7 @@ typedef struct tlan_private_tag {
408 412
409#define TLAN_TLPHY_ID 0x10 413#define TLAN_TLPHY_ID 0x10
410#define TLAN_TLPHY_CTL 0x11 414#define TLAN_TLPHY_CTL 0x11
411#define TLAN_TC_IGLINK 0x8000 415#define TLAN_TC_IGLINK 0x8000
412#define TLAN_TC_SWAPOL 0x4000 416#define TLAN_TC_SWAPOL 0x4000
413#define TLAN_TC_AUISEL 0x2000 417#define TLAN_TC_AUISEL 0x2000
414#define TLAN_TC_SQEEN 0x1000 418#define TLAN_TC_SQEEN 0x1000
@@ -435,41 +439,41 @@ typedef struct tlan_private_tag {
435#define LEVEL1_ID1 0x7810 439#define LEVEL1_ID1 0x7810
436#define LEVEL1_ID2 0x0000 440#define LEVEL1_ID2 0x0000
437 441
438#define CIRC_INC( a, b ) if ( ++a >= b ) a = 0 442#define CIRC_INC(a, b) if (++a >= b) a = 0
439 443
440/* Routines to access internal registers. */ 444/* Routines to access internal registers. */
441 445
442static inline u8 TLan_DioRead8(u16 base_addr, u16 internal_addr) 446static inline u8 tlan_dio_read8(u16 base_addr, u16 internal_addr)
443{ 447{
444 outw(internal_addr, base_addr + TLAN_DIO_ADR); 448 outw(internal_addr, base_addr + TLAN_DIO_ADR);
445 return inb((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x3)); 449 return inb((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x3));
446 450
447} /* TLan_DioRead8 */ 451}
448 452
449 453
450 454
451 455
452static inline u16 TLan_DioRead16(u16 base_addr, u16 internal_addr) 456static inline u16 tlan_dio_read16(u16 base_addr, u16 internal_addr)
453{ 457{
454 outw(internal_addr, base_addr + TLAN_DIO_ADR); 458 outw(internal_addr, base_addr + TLAN_DIO_ADR);
455 return inw((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x2)); 459 return inw((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x2));
456 460
457} /* TLan_DioRead16 */ 461}
458 462
459 463
460 464
461 465
462static inline u32 TLan_DioRead32(u16 base_addr, u16 internal_addr) 466static inline u32 tlan_dio_read32(u16 base_addr, u16 internal_addr)
463{ 467{
464 outw(internal_addr, base_addr + TLAN_DIO_ADR); 468 outw(internal_addr, base_addr + TLAN_DIO_ADR);
465 return inl(base_addr + TLAN_DIO_DATA); 469 return inl(base_addr + TLAN_DIO_DATA);
466 470
467} /* TLan_DioRead32 */ 471}
468 472
469 473
470 474
471 475
472static inline void TLan_DioWrite8(u16 base_addr, u16 internal_addr, u8 data) 476static inline void tlan_dio_write8(u16 base_addr, u16 internal_addr, u8 data)
473{ 477{
474 outw(internal_addr, base_addr + TLAN_DIO_ADR); 478 outw(internal_addr, base_addr + TLAN_DIO_ADR);
475 outb(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x3)); 479 outb(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x3));
@@ -479,7 +483,7 @@ static inline void TLan_DioWrite8(u16 base_addr, u16 internal_addr, u8 data)
479 483
480 484
481 485
482static inline void TLan_DioWrite16(u16 base_addr, u16 internal_addr, u16 data) 486static inline void tlan_dio_write16(u16 base_addr, u16 internal_addr, u16 data)
483{ 487{
484 outw(internal_addr, base_addr + TLAN_DIO_ADR); 488 outw(internal_addr, base_addr + TLAN_DIO_ADR);
485 outw(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2)); 489 outw(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2));
@@ -489,16 +493,16 @@ static inline void TLan_DioWrite16(u16 base_addr, u16 internal_addr, u16 data)
489 493
490 494
491 495
492static inline void TLan_DioWrite32(u16 base_addr, u16 internal_addr, u32 data) 496static inline void tlan_dio_write32(u16 base_addr, u16 internal_addr, u32 data)
493{ 497{
494 outw(internal_addr, base_addr + TLAN_DIO_ADR); 498 outw(internal_addr, base_addr + TLAN_DIO_ADR);
495 outl(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2)); 499 outl(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2));
496 500
497} 501}
498 502
499#define TLan_ClearBit( bit, port ) outb_p(inb_p(port) & ~bit, port) 503#define tlan_clear_bit(bit, port) outb_p(inb_p(port) & ~bit, port)
500#define TLan_GetBit( bit, port ) ((int) (inb_p(port) & bit)) 504#define tlan_get_bit(bit, port) ((int) (inb_p(port) & bit))
501#define TLan_SetBit( bit, port ) outb_p(inb_p(port) | bit, port) 505#define tlan_set_bit(bit, port) outb_p(inb_p(port) | bit, port)
502 506
503/* 507/*
504 * given 6 bytes, view them as 8 6-bit numbers and return the XOR of those 508 * given 6 bytes, view them as 8 6-bit numbers and return the XOR of those
@@ -506,37 +510,37 @@ static inline void TLan_DioWrite32(u16 base_addr, u16 internal_addr, u32 data)
506 * 510 *
507 * The original code was: 511 * The original code was:
508 * 512 *
509 * u32 xor( u32 a, u32 b ) { return ( ( a && ! b ) || ( ! a && b ) ); } 513 * u32 xor(u32 a, u32 b) { return ((a && !b ) || (! a && b )); }
510 * 514 *
511 * #define XOR8( a, b, c, d, e, f, g, h ) \ 515 * #define XOR8(a, b, c, d, e, f, g, h) \
512 * xor( a, xor( b, xor( c, xor( d, xor( e, xor( f, xor( g, h ) ) ) ) ) ) ) 516 * xor(a, xor(b, xor(c, xor(d, xor(e, xor(f, xor(g, h)) ) ) ) ) )
513 * #define DA( a, bit ) ( ( (u8) a[bit/8] ) & ( (u8) ( 1 << bit%8 ) ) ) 517 * #define DA(a, bit) (( (u8) a[bit/8] ) & ( (u8) (1 << bit%8)) )
514 * 518 *
515 * hash = XOR8( DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24), 519 * hash = XOR8(DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24),
516 * DA(a,30), DA(a,36), DA(a,42) ); 520 * DA(a,30), DA(a,36), DA(a,42));
517 * hash |= XOR8( DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25), 521 * hash |= XOR8(DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25),
518 * DA(a,31), DA(a,37), DA(a,43) ) << 1; 522 * DA(a,31), DA(a,37), DA(a,43)) << 1;
519 * hash |= XOR8( DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26), 523 * hash |= XOR8(DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26),
520 * DA(a,32), DA(a,38), DA(a,44) ) << 2; 524 * DA(a,32), DA(a,38), DA(a,44)) << 2;
521 * hash |= XOR8( DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27), 525 * hash |= XOR8(DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27),
522 * DA(a,33), DA(a,39), DA(a,45) ) << 3; 526 * DA(a,33), DA(a,39), DA(a,45)) << 3;
523 * hash |= XOR8( DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28), 527 * hash |= XOR8(DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28),
524 * DA(a,34), DA(a,40), DA(a,46) ) << 4; 528 * DA(a,34), DA(a,40), DA(a,46)) << 4;
525 * hash |= XOR8( DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29), 529 * hash |= XOR8(DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29),
526 * DA(a,35), DA(a,41), DA(a,47) ) << 5; 530 * DA(a,35), DA(a,41), DA(a,47)) << 5;
527 * 531 *
528 */ 532 */
529static inline u32 TLan_HashFunc( const u8 *a ) 533static inline u32 tlan_hash_func(const u8 *a)
530{ 534{
531 u8 hash; 535 u8 hash;
532 536
533 hash = (a[0]^a[3]); /* & 077 */ 537 hash = (a[0]^a[3]); /* & 077 */
534 hash ^= ((a[0]^a[3])>>6); /* & 003 */ 538 hash ^= ((a[0]^a[3])>>6); /* & 003 */
535 hash ^= ((a[1]^a[4])<<2); /* & 074 */ 539 hash ^= ((a[1]^a[4])<<2); /* & 074 */
536 hash ^= ((a[1]^a[4])>>4); /* & 017 */ 540 hash ^= ((a[1]^a[4])>>4); /* & 017 */
537 hash ^= ((a[2]^a[5])<<4); /* & 060 */ 541 hash ^= ((a[2]^a[5])<<4); /* & 060 */
538 hash ^= ((a[2]^a[5])>>2); /* & 077 */ 542 hash ^= ((a[2]^a[5])>>2); /* & 077 */
539 543
540 return hash & 077; 544 return hash & 077;
541} 545}
542#endif 546#endif
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index b100bd50a0d..f5e9ac00a07 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -34,6 +34,8 @@
34 * Modifications for 2.3.99-pre5 kernel. 34 * Modifications for 2.3.99-pre5 kernel.
35 */ 35 */
36 36
37#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
38
37#define DRV_NAME "tun" 39#define DRV_NAME "tun"
38#define DRV_VERSION "1.6" 40#define DRV_VERSION "1.6"
39#define DRV_DESCRIPTION "Universal TUN/TAP device driver" 41#define DRV_DESCRIPTION "Universal TUN/TAP device driver"
@@ -76,11 +78,27 @@
76#ifdef TUN_DEBUG 78#ifdef TUN_DEBUG
77static int debug; 79static int debug;
78 80
79#define DBG if(tun->debug)printk 81#define tun_debug(level, tun, fmt, args...) \
80#define DBG1 if(debug==2)printk 82do { \
83 if (tun->debug) \
84 netdev_printk(level, tun->dev, fmt, ##args); \
85} while (0)
86#define DBG1(level, fmt, args...) \
87do { \
88 if (debug == 2) \
89 printk(level fmt, ##args); \
90} while (0)
81#else 91#else
82#define DBG( a... ) 92#define tun_debug(level, tun, fmt, args...) \
83#define DBG1( a... ) 93do { \
94 if (0) \
95 netdev_printk(level, tun->dev, fmt, ##args); \
96} while (0)
97#define DBG1(level, fmt, args...) \
98do { \
99 if (0) \
100 printk(level fmt, ##args); \
101} while (0)
84#endif 102#endif
85 103
86#define FLT_EXACT_COUNT 8 104#define FLT_EXACT_COUNT 8
@@ -205,7 +223,7 @@ static void tun_put(struct tun_struct *tun)
205 tun_detach(tfile->tun); 223 tun_detach(tfile->tun);
206} 224}
207 225
208/* TAP filterting */ 226/* TAP filtering */
209static void addr_hash_set(u32 *mask, const u8 *addr) 227static void addr_hash_set(u32 *mask, const u8 *addr)
210{ 228{
211 int n = ether_crc(ETH_ALEN, addr) >> 26; 229 int n = ether_crc(ETH_ALEN, addr) >> 26;
@@ -360,7 +378,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
360{ 378{
361 struct tun_struct *tun = netdev_priv(dev); 379 struct tun_struct *tun = netdev_priv(dev);
362 380
363 DBG(KERN_INFO "%s: tun_net_xmit %d\n", tun->dev->name, skb->len); 381 tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
364 382
365 /* Drop packet if interface is not attached */ 383 /* Drop packet if interface is not attached */
366 if (!tun->tfile) 384 if (!tun->tfile)
@@ -499,7 +517,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
499 517
500 sk = tun->socket.sk; 518 sk = tun->socket.sk;
501 519
502 DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name); 520 tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
503 521
504 poll_wait(file, &tun->wq.wait, wait); 522 poll_wait(file, &tun->wq.wait, wait);
505 523
@@ -690,7 +708,7 @@ static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
690 if (!tun) 708 if (!tun)
691 return -EBADFD; 709 return -EBADFD;
692 710
693 DBG(KERN_INFO "%s: tun_chr_write %ld\n", tun->dev->name, count); 711 tun_debug(KERN_INFO, tun, "tun_chr_write %ld\n", count);
694 712
695 result = tun_get_user(tun, iv, iov_length(iv, count), 713 result = tun_get_user(tun, iv, iov_length(iv, count),
696 file->f_flags & O_NONBLOCK); 714 file->f_flags & O_NONBLOCK);
@@ -739,7 +757,7 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
739 else if (sinfo->gso_type & SKB_GSO_UDP) 757 else if (sinfo->gso_type & SKB_GSO_UDP)
740 gso.gso_type = VIRTIO_NET_HDR_GSO_UDP; 758 gso.gso_type = VIRTIO_NET_HDR_GSO_UDP;
741 else { 759 else {
742 printk(KERN_ERR "tun: unexpected GSO type: " 760 pr_err("unexpected GSO type: "
743 "0x%x, gso_size %d, hdr_len %d\n", 761 "0x%x, gso_size %d, hdr_len %d\n",
744 sinfo->gso_type, gso.gso_size, 762 sinfo->gso_type, gso.gso_size,
745 gso.hdr_len); 763 gso.hdr_len);
@@ -786,7 +804,7 @@ static ssize_t tun_do_read(struct tun_struct *tun,
786 struct sk_buff *skb; 804 struct sk_buff *skb;
787 ssize_t ret = 0; 805 ssize_t ret = 0;
788 806
789 DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name); 807 tun_debug(KERN_INFO, tun, "tun_chr_read\n");
790 808
791 add_wait_queue(&tun->wq.wait, &wait); 809 add_wait_queue(&tun->wq.wait, &wait);
792 while (len) { 810 while (len) {
@@ -1083,7 +1101,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1083 if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) || 1101 if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) ||
1084 device_create_file(&tun->dev->dev, &dev_attr_owner) || 1102 device_create_file(&tun->dev->dev, &dev_attr_owner) ||
1085 device_create_file(&tun->dev->dev, &dev_attr_group)) 1103 device_create_file(&tun->dev->dev, &dev_attr_group))
1086 printk(KERN_ERR "Failed to create tun sysfs files\n"); 1104 pr_err("Failed to create tun sysfs files\n");
1087 1105
1088 sk->sk_destruct = tun_sock_destruct; 1106 sk->sk_destruct = tun_sock_destruct;
1089 1107
@@ -1092,7 +1110,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1092 goto failed; 1110 goto failed;
1093 } 1111 }
1094 1112
1095 DBG(KERN_INFO "%s: tun_set_iff\n", tun->dev->name); 1113 tun_debug(KERN_INFO, tun, "tun_set_iff\n");
1096 1114
1097 if (ifr->ifr_flags & IFF_NO_PI) 1115 if (ifr->ifr_flags & IFF_NO_PI)
1098 tun->flags |= TUN_NO_PI; 1116 tun->flags |= TUN_NO_PI;
@@ -1129,7 +1147,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1129static int tun_get_iff(struct net *net, struct tun_struct *tun, 1147static int tun_get_iff(struct net *net, struct tun_struct *tun,
1130 struct ifreq *ifr) 1148 struct ifreq *ifr)
1131{ 1149{
1132 DBG(KERN_INFO "%s: tun_get_iff\n", tun->dev->name); 1150 tun_debug(KERN_INFO, tun, "tun_get_iff\n");
1133 1151
1134 strcpy(ifr->ifr_name, tun->dev->name); 1152 strcpy(ifr->ifr_name, tun->dev->name);
1135 1153
@@ -1142,7 +1160,7 @@ static int tun_get_iff(struct net *net, struct tun_struct *tun,
1142 * privs required. */ 1160 * privs required. */
1143static int set_offload(struct net_device *dev, unsigned long arg) 1161static int set_offload(struct net_device *dev, unsigned long arg)
1144{ 1162{
1145 unsigned int old_features, features; 1163 u32 old_features, features;
1146 1164
1147 old_features = dev->features; 1165 old_features = dev->features;
1148 /* Unset features, set them as we chew on the arg. */ 1166 /* Unset features, set them as we chew on the arg. */
@@ -1229,7 +1247,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1229 if (!tun) 1247 if (!tun)
1230 goto unlock; 1248 goto unlock;
1231 1249
1232 DBG(KERN_INFO "%s: tun_chr_ioctl cmd %d\n", tun->dev->name, cmd); 1250 tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %d\n", cmd);
1233 1251
1234 ret = 0; 1252 ret = 0;
1235 switch (cmd) { 1253 switch (cmd) {
@@ -1249,8 +1267,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1249 else 1267 else
1250 tun->flags &= ~TUN_NOCHECKSUM; 1268 tun->flags &= ~TUN_NOCHECKSUM;
1251 1269
1252 DBG(KERN_INFO "%s: checksum %s\n", 1270 tun_debug(KERN_INFO, tun, "checksum %s\n",
1253 tun->dev->name, arg ? "disabled" : "enabled"); 1271 arg ? "disabled" : "enabled");
1254 break; 1272 break;
1255 1273
1256 case TUNSETPERSIST: 1274 case TUNSETPERSIST:
@@ -1260,33 +1278,34 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1260 else 1278 else
1261 tun->flags &= ~TUN_PERSIST; 1279 tun->flags &= ~TUN_PERSIST;
1262 1280
1263 DBG(KERN_INFO "%s: persist %s\n", 1281 tun_debug(KERN_INFO, tun, "persist %s\n",
1264 tun->dev->name, arg ? "enabled" : "disabled"); 1282 arg ? "enabled" : "disabled");
1265 break; 1283 break;
1266 1284
1267 case TUNSETOWNER: 1285 case TUNSETOWNER:
1268 /* Set owner of the device */ 1286 /* Set owner of the device */
1269 tun->owner = (uid_t) arg; 1287 tun->owner = (uid_t) arg;
1270 1288
1271 DBG(KERN_INFO "%s: owner set to %d\n", tun->dev->name, tun->owner); 1289 tun_debug(KERN_INFO, tun, "owner set to %d\n", tun->owner);
1272 break; 1290 break;
1273 1291
1274 case TUNSETGROUP: 1292 case TUNSETGROUP:
1275 /* Set group of the device */ 1293 /* Set group of the device */
1276 tun->group= (gid_t) arg; 1294 tun->group= (gid_t) arg;
1277 1295
1278 DBG(KERN_INFO "%s: group set to %d\n", tun->dev->name, tun->group); 1296 tun_debug(KERN_INFO, tun, "group set to %d\n", tun->group);
1279 break; 1297 break;
1280 1298
1281 case TUNSETLINK: 1299 case TUNSETLINK:
1282 /* Only allow setting the type when the interface is down */ 1300 /* Only allow setting the type when the interface is down */
1283 if (tun->dev->flags & IFF_UP) { 1301 if (tun->dev->flags & IFF_UP) {
1284 DBG(KERN_INFO "%s: Linktype set failed because interface is up\n", 1302 tun_debug(KERN_INFO, tun,
1285 tun->dev->name); 1303 "Linktype set failed because interface is up\n");
1286 ret = -EBUSY; 1304 ret = -EBUSY;
1287 } else { 1305 } else {
1288 tun->dev->type = (int) arg; 1306 tun->dev->type = (int) arg;
1289 DBG(KERN_INFO "%s: linktype set to %d\n", tun->dev->name, tun->dev->type); 1307 tun_debug(KERN_INFO, tun, "linktype set to %d\n",
1308 tun->dev->type);
1290 ret = 0; 1309 ret = 0;
1291 } 1310 }
1292 break; 1311 break;
@@ -1318,8 +1337,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1318 1337
1319 case SIOCSIFHWADDR: 1338 case SIOCSIFHWADDR:
1320 /* Set hw address */ 1339 /* Set hw address */
1321 DBG(KERN_DEBUG "%s: set hw address: %pM\n", 1340 tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n",
1322 tun->dev->name, ifr.ifr_hwaddr.sa_data); 1341 ifr.ifr_hwaddr.sa_data);
1323 1342
1324 ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr); 1343 ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
1325 break; 1344 break;
@@ -1433,7 +1452,7 @@ static int tun_chr_fasync(int fd, struct file *file, int on)
1433 if (!tun) 1452 if (!tun)
1434 return -EBADFD; 1453 return -EBADFD;
1435 1454
1436 DBG(KERN_INFO "%s: tun_chr_fasync %d\n", tun->dev->name, on); 1455 tun_debug(KERN_INFO, tun, "tun_chr_fasync %d\n", on);
1437 1456
1438 if ((ret = fasync_helper(fd, file, on, &tun->fasync)) < 0) 1457 if ((ret = fasync_helper(fd, file, on, &tun->fasync)) < 0)
1439 goto out; 1458 goto out;
@@ -1455,7 +1474,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
1455{ 1474{
1456 struct tun_file *tfile; 1475 struct tun_file *tfile;
1457 1476
1458 DBG1(KERN_INFO "tunX: tun_chr_open\n"); 1477 DBG1(KERN_INFO, "tunX: tun_chr_open\n");
1459 1478
1460 tfile = kmalloc(sizeof(*tfile), GFP_KERNEL); 1479 tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
1461 if (!tfile) 1480 if (!tfile)
@@ -1476,7 +1495,7 @@ static int tun_chr_close(struct inode *inode, struct file *file)
1476 if (tun) { 1495 if (tun) {
1477 struct net_device *dev = tun->dev; 1496 struct net_device *dev = tun->dev;
1478 1497
1479 DBG(KERN_INFO "%s: tun_chr_close\n", dev->name); 1498 tun_debug(KERN_INFO, tun, "tun_chr_close\n");
1480 1499
1481 __tun_detach(tun); 1500 __tun_detach(tun);
1482 1501
@@ -1607,18 +1626,18 @@ static int __init tun_init(void)
1607{ 1626{
1608 int ret = 0; 1627 int ret = 0;
1609 1628
1610 printk(KERN_INFO "tun: %s, %s\n", DRV_DESCRIPTION, DRV_VERSION); 1629 pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
1611 printk(KERN_INFO "tun: %s\n", DRV_COPYRIGHT); 1630 pr_info("%s\n", DRV_COPYRIGHT);
1612 1631
1613 ret = rtnl_link_register(&tun_link_ops); 1632 ret = rtnl_link_register(&tun_link_ops);
1614 if (ret) { 1633 if (ret) {
1615 printk(KERN_ERR "tun: Can't register link_ops\n"); 1634 pr_err("Can't register link_ops\n");
1616 goto err_linkops; 1635 goto err_linkops;
1617 } 1636 }
1618 1637
1619 ret = misc_register(&tun_miscdev); 1638 ret = misc_register(&tun_miscdev);
1620 if (ret) { 1639 if (ret) {
1621 printk(KERN_ERR "tun: Can't register misc device %d\n", TUN_MINOR); 1640 pr_err("Can't register misc device %d\n", TUN_MINOR);
1622 goto err_misc; 1641 goto err_misc;
1623 } 1642 }
1624 return 0; 1643 return 0;
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index a3c46f6a15e..7fa5ec2de94 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -123,12 +123,11 @@ static const int multicast_filter_limit = 32;
123#include <linux/in6.h> 123#include <linux/in6.h>
124#include <linux/dma-mapping.h> 124#include <linux/dma-mapping.h>
125#include <linux/firmware.h> 125#include <linux/firmware.h>
126#include <generated/utsrelease.h>
127 126
128#include "typhoon.h" 127#include "typhoon.h"
129 128
130MODULE_AUTHOR("David Dillow <dave@thedillows.org>"); 129MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
131MODULE_VERSION(UTS_RELEASE); 130MODULE_VERSION("1.0");
132MODULE_LICENSE("GPL"); 131MODULE_LICENSE("GPL");
133MODULE_FIRMWARE(FIRMWARE_NAME); 132MODULE_FIRMWARE(FIRMWARE_NAME);
134MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)"); 133MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index cc83fa71c3f..105d7f0630c 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -403,17 +403,6 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
403 if (tb[IFLA_ADDRESS] == NULL) 403 if (tb[IFLA_ADDRESS] == NULL)
404 random_ether_addr(dev->dev_addr); 404 random_ether_addr(dev->dev_addr);
405 405
406 if (tb[IFLA_IFNAME])
407 nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
408 else
409 snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
410
411 if (strchr(dev->name, '%')) {
412 err = dev_alloc_name(dev, dev->name);
413 if (err < 0)
414 goto err_alloc_name;
415 }
416
417 err = register_netdevice(dev); 406 err = register_netdevice(dev);
418 if (err < 0) 407 if (err < 0)
419 goto err_register_dev; 408 goto err_register_dev;
@@ -433,7 +422,6 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
433 422
434err_register_dev: 423err_register_dev:
435 /* nothing to do */ 424 /* nothing to do */
436err_alloc_name:
437err_configure_peer: 425err_configure_peer:
438 unregister_netdevice(peer); 426 unregister_netdevice(peer);
439 return err; 427 return err;
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 09cac704fdd..0d6fec6b7d9 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -2923,6 +2923,7 @@ static u16 wol_calc_crc(int size, u8 *pattern, u8 *mask_pattern)
2923static int velocity_set_wol(struct velocity_info *vptr) 2923static int velocity_set_wol(struct velocity_info *vptr)
2924{ 2924{
2925 struct mac_regs __iomem *regs = vptr->mac_regs; 2925 struct mac_regs __iomem *regs = vptr->mac_regs;
2926 enum speed_opt spd_dpx = vptr->options.spd_dpx;
2926 static u8 buf[256]; 2927 static u8 buf[256];
2927 int i; 2928 int i;
2928 2929
@@ -2968,6 +2969,12 @@ static int velocity_set_wol(struct velocity_info *vptr)
2968 2969
2969 writew(0x0FFF, &regs->WOLSRClr); 2970 writew(0x0FFF, &regs->WOLSRClr);
2970 2971
2972 if (spd_dpx == SPD_DPX_1000_FULL)
2973 goto mac_done;
2974
2975 if (spd_dpx != SPD_DPX_AUTO)
2976 goto advertise_done;
2977
2971 if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) { 2978 if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
2972 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201) 2979 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
2973 MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs); 2980 MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
@@ -2978,6 +2985,7 @@ static int velocity_set_wol(struct velocity_info *vptr)
2978 if (vptr->mii_status & VELOCITY_SPEED_1000) 2985 if (vptr->mii_status & VELOCITY_SPEED_1000)
2979 MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs); 2986 MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
2980 2987
2988advertise_done:
2981 BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR); 2989 BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
2982 2990
2983 { 2991 {
@@ -2987,6 +2995,7 @@ static int velocity_set_wol(struct velocity_info *vptr)
2987 writeb(GCR, &regs->CHIPGCR); 2995 writeb(GCR, &regs->CHIPGCR);
2988 } 2996 }
2989 2997
2998mac_done:
2990 BYTE_REG_BITS_OFF(ISR_PWEI, &regs->ISR); 2999 BYTE_REG_BITS_OFF(ISR_PWEI, &regs->ISR);
2991 /* Turn on SWPTAG just before entering power mode */ 3000 /* Turn on SWPTAG just before entering power mode */
2992 BYTE_REG_BITS_ON(STICKHW_SWPTAG, &regs->STICKHW); 3001 BYTE_REG_BITS_ON(STICKHW_SWPTAG, &regs->STICKHW);
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index aa2e69b9ff6..d7227539484 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -361,7 +361,7 @@ enum velocity_owner {
361#define MAC_REG_CHIPGSR 0x9C 361#define MAC_REG_CHIPGSR 0x9C
362#define MAC_REG_TESTCFG 0x9D 362#define MAC_REG_TESTCFG 0x9D
363#define MAC_REG_DEBUG 0x9E 363#define MAC_REG_DEBUG 0x9E
364#define MAC_REG_CHIPGCR 0x9F 364#define MAC_REG_CHIPGCR 0x9F /* Chip Operation and Diagnostic Control */
365#define MAC_REG_WOLCR0_SET 0xA0 365#define MAC_REG_WOLCR0_SET 0xA0
366#define MAC_REG_WOLCR1_SET 0xA1 366#define MAC_REG_WOLCR1_SET 0xA1
367#define MAC_REG_PWCFG_SET 0xA2 367#define MAC_REG_PWCFG_SET 0xA2
@@ -848,10 +848,10 @@ enum velocity_owner {
848 * Bits in CHIPGCR register 848 * Bits in CHIPGCR register
849 */ 849 */
850 850
851#define CHIPGCR_FCGMII 0x80 /* enable GMII mode */ 851#define CHIPGCR_FCGMII 0x80 /* force GMII (else MII only) */
852#define CHIPGCR_FCFDX 0x40 852#define CHIPGCR_FCFDX 0x40 /* force full duplex */
853#define CHIPGCR_FCRESV 0x20 853#define CHIPGCR_FCRESV 0x20
854#define CHIPGCR_FCMODE 0x10 854#define CHIPGCR_FCMODE 0x10 /* enable MAC forced mode */
855#define CHIPGCR_LPSOPT 0x08 855#define CHIPGCR_LPSOPT 0x08
856#define CHIPGCR_TM1US 0x04 856#define CHIPGCR_TM1US 0x04
857#define CHIPGCR_TM0US 0x02 857#define CHIPGCR_TM0US 0x02
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index 228d4f7a58a..e74e4b42592 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -387,8 +387,8 @@ vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
387 data1 = steer_ctrl = 0; 387 data1 = steer_ctrl = 0;
388 388
389 status = vxge_hw_vpath_fw_api(vpath, 389 status = vxge_hw_vpath_fw_api(vpath,
390 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
391 VXGE_HW_FW_API_GET_EPROM_REV, 390 VXGE_HW_FW_API_GET_EPROM_REV,
391 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
392 0, &data0, &data1, &steer_ctrl); 392 0, &data0, &data1, &steer_ctrl);
393 if (status != VXGE_HW_OK) 393 if (status != VXGE_HW_OK)
394 break; 394 break;
@@ -2868,6 +2868,8 @@ __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
2868 ring->rxd_init = attr->rxd_init; 2868 ring->rxd_init = attr->rxd_init;
2869 ring->rxd_term = attr->rxd_term; 2869 ring->rxd_term = attr->rxd_term;
2870 ring->buffer_mode = config->buffer_mode; 2870 ring->buffer_mode = config->buffer_mode;
2871 ring->tim_rti_cfg1_saved = vp->vpath->tim_rti_cfg1_saved;
2872 ring->tim_rti_cfg3_saved = vp->vpath->tim_rti_cfg3_saved;
2871 ring->rxds_limit = config->rxds_limit; 2873 ring->rxds_limit = config->rxds_limit;
2872 2874
2873 ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode); 2875 ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
@@ -3511,6 +3513,8 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
3511 3513
3512 /* apply "interrupts per txdl" attribute */ 3514 /* apply "interrupts per txdl" attribute */
3513 fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ; 3515 fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ;
3516 fifo->tim_tti_cfg1_saved = vpath->tim_tti_cfg1_saved;
3517 fifo->tim_tti_cfg3_saved = vpath->tim_tti_cfg3_saved;
3514 3518
3515 if (fifo->config->intr) 3519 if (fifo->config->intr)
3516 fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST; 3520 fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;
@@ -4377,6 +4381,8 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4377 } 4381 }
4378 4382
4379 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); 4383 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4384 vpath->tim_tti_cfg1_saved = val64;
4385
4380 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]); 4386 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
4381 4387
4382 if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) { 4388 if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
@@ -4433,6 +4439,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4433 } 4439 }
4434 4440
4435 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]); 4441 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
4442 vpath->tim_tti_cfg3_saved = val64;
4436 } 4443 }
4437 4444
4438 if (config->ring.enable == VXGE_HW_RING_ENABLE) { 4445 if (config->ring.enable == VXGE_HW_RING_ENABLE) {
@@ -4481,6 +4488,8 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4481 } 4488 }
4482 4489
4483 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]); 4490 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
4491 vpath->tim_rti_cfg1_saved = val64;
4492
4484 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]); 4493 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
4485 4494
4486 if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) { 4495 if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
@@ -4537,6 +4546,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4537 } 4546 }
4538 4547
4539 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]); 4548 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
4549 vpath->tim_rti_cfg3_saved = val64;
4540 } 4550 }
4541 4551
4542 val64 = 0; 4552 val64 = 0;
@@ -4555,26 +4565,6 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4555 return status; 4565 return status;
4556} 4566}
4557 4567
4558void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
4559{
4560 struct __vxge_hw_virtualpath *vpath;
4561 struct vxge_hw_vpath_reg __iomem *vp_reg;
4562 struct vxge_hw_vp_config *config;
4563 u64 val64;
4564
4565 vpath = &hldev->virtual_paths[vp_id];
4566 vp_reg = vpath->vp_reg;
4567 config = vpath->vp_config;
4568
4569 if (config->fifo.enable == VXGE_HW_FIFO_ENABLE &&
4570 config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
4571 config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
4572 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4573 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
4574 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4575 }
4576}
4577
4578/* 4568/*
4579 * __vxge_hw_vpath_initialize 4569 * __vxge_hw_vpath_initialize
4580 * This routine is the final phase of init which initializes the 4570 * This routine is the final phase of init which initializes the
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
index e249e288d16..3c53aa732c9 100644
--- a/drivers/net/vxge/vxge-config.h
+++ b/drivers/net/vxge/vxge-config.h
@@ -682,6 +682,10 @@ struct __vxge_hw_virtualpath {
682 u32 vsport_number; 682 u32 vsport_number;
683 u32 max_kdfc_db; 683 u32 max_kdfc_db;
684 u32 max_nofl_db; 684 u32 max_nofl_db;
685 u64 tim_tti_cfg1_saved;
686 u64 tim_tti_cfg3_saved;
687 u64 tim_rti_cfg1_saved;
688 u64 tim_rti_cfg3_saved;
685 689
686 struct __vxge_hw_ring *____cacheline_aligned ringh; 690 struct __vxge_hw_ring *____cacheline_aligned ringh;
687 struct __vxge_hw_fifo *____cacheline_aligned fifoh; 691 struct __vxge_hw_fifo *____cacheline_aligned fifoh;
@@ -921,6 +925,9 @@ struct __vxge_hw_ring {
921 u32 doorbell_cnt; 925 u32 doorbell_cnt;
922 u32 total_db_cnt; 926 u32 total_db_cnt;
923 u64 rxds_limit; 927 u64 rxds_limit;
928 u32 rtimer;
929 u64 tim_rti_cfg1_saved;
930 u64 tim_rti_cfg3_saved;
924 931
925 enum vxge_hw_status (*callback)( 932 enum vxge_hw_status (*callback)(
926 struct __vxge_hw_ring *ringh, 933 struct __vxge_hw_ring *ringh,
@@ -1000,6 +1007,9 @@ struct __vxge_hw_fifo {
1000 u32 per_txdl_space; 1007 u32 per_txdl_space;
1001 u32 vp_id; 1008 u32 vp_id;
1002 u32 tx_intr_num; 1009 u32 tx_intr_num;
1010 u32 rtimer;
1011 u64 tim_tti_cfg1_saved;
1012 u64 tim_tti_cfg3_saved;
1003 1013
1004 enum vxge_hw_status (*callback)( 1014 enum vxge_hw_status (*callback)(
1005 struct __vxge_hw_fifo *fifo_handle, 1015 struct __vxge_hw_fifo *fifo_handle,
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index c81a6512c68..395423aeec0 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -371,9 +371,6 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
371 struct vxge_hw_ring_rxd_info ext_info; 371 struct vxge_hw_ring_rxd_info ext_info;
372 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", 372 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
373 ring->ndev->name, __func__, __LINE__); 373 ring->ndev->name, __func__, __LINE__);
374 ring->pkts_processed = 0;
375
376 vxge_hw_ring_replenish(ringh);
377 374
378 do { 375 do {
379 prefetch((char *)dtr + L1_CACHE_BYTES); 376 prefetch((char *)dtr + L1_CACHE_BYTES);
@@ -1588,6 +1585,36 @@ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
1588 return ret; 1585 return ret;
1589} 1586}
1590 1587
1588/* Configure CI */
1589static void vxge_config_ci_for_tti_rti(struct vxgedev *vdev)
1590{
1591 int i = 0;
1592
1593 /* Enable CI for RTI */
1594 if (vdev->config.intr_type == MSI_X) {
1595 for (i = 0; i < vdev->no_of_vpath; i++) {
1596 struct __vxge_hw_ring *hw_ring;
1597
1598 hw_ring = vdev->vpaths[i].ring.handle;
1599 vxge_hw_vpath_dynamic_rti_ci_set(hw_ring);
1600 }
1601 }
1602
1603 /* Enable CI for TTI */
1604 for (i = 0; i < vdev->no_of_vpath; i++) {
1605 struct __vxge_hw_fifo *hw_fifo = vdev->vpaths[i].fifo.handle;
1606 vxge_hw_vpath_tti_ci_set(hw_fifo);
1607 /*
1608 * For Inta (with or without napi), Set CI ON for only one
1609 * vpath. (Have only one free running timer).
1610 */
1611 if ((vdev->config.intr_type == INTA) && (i == 0))
1612 break;
1613 }
1614
1615 return;
1616}
1617
1591static int do_vxge_reset(struct vxgedev *vdev, int event) 1618static int do_vxge_reset(struct vxgedev *vdev, int event)
1592{ 1619{
1593 enum vxge_hw_status status; 1620 enum vxge_hw_status status;
@@ -1753,6 +1780,9 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
1753 netif_tx_wake_all_queues(vdev->ndev); 1780 netif_tx_wake_all_queues(vdev->ndev);
1754 } 1781 }
1755 1782
1783 /* configure CI */
1784 vxge_config_ci_for_tti_rti(vdev);
1785
1756out: 1786out:
1757 vxge_debug_entryexit(VXGE_TRACE, 1787 vxge_debug_entryexit(VXGE_TRACE,
1758 "%s:%d Exiting...", __func__, __LINE__); 1788 "%s:%d Exiting...", __func__, __LINE__);
@@ -1793,22 +1823,29 @@ static void vxge_reset(struct work_struct *work)
1793 */ 1823 */
1794static int vxge_poll_msix(struct napi_struct *napi, int budget) 1824static int vxge_poll_msix(struct napi_struct *napi, int budget)
1795{ 1825{
1796 struct vxge_ring *ring = 1826 struct vxge_ring *ring = container_of(napi, struct vxge_ring, napi);
1797 container_of(napi, struct vxge_ring, napi); 1827 int pkts_processed;
1798 int budget_org = budget; 1828 int budget_org = budget;
1799 ring->budget = budget;
1800 1829
1830 ring->budget = budget;
1831 ring->pkts_processed = 0;
1801 vxge_hw_vpath_poll_rx(ring->handle); 1832 vxge_hw_vpath_poll_rx(ring->handle);
1833 pkts_processed = ring->pkts_processed;
1802 1834
1803 if (ring->pkts_processed < budget_org) { 1835 if (ring->pkts_processed < budget_org) {
1804 napi_complete(napi); 1836 napi_complete(napi);
1837
1805 /* Re enable the Rx interrupts for the vpath */ 1838 /* Re enable the Rx interrupts for the vpath */
1806 vxge_hw_channel_msix_unmask( 1839 vxge_hw_channel_msix_unmask(
1807 (struct __vxge_hw_channel *)ring->handle, 1840 (struct __vxge_hw_channel *)ring->handle,
1808 ring->rx_vector_no); 1841 ring->rx_vector_no);
1842 mmiowb();
1809 } 1843 }
1810 1844
1811 return ring->pkts_processed; 1845 /* We are copying and returning the local variable, in case if after
1846 * clearing the msix interrupt above, if the interrupt fires right
1847 * away which can preempt this NAPI thread */
1848 return pkts_processed;
1812} 1849}
1813 1850
1814static int vxge_poll_inta(struct napi_struct *napi, int budget) 1851static int vxge_poll_inta(struct napi_struct *napi, int budget)
@@ -1824,6 +1861,7 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget)
1824 for (i = 0; i < vdev->no_of_vpath; i++) { 1861 for (i = 0; i < vdev->no_of_vpath; i++) {
1825 ring = &vdev->vpaths[i].ring; 1862 ring = &vdev->vpaths[i].ring;
1826 ring->budget = budget; 1863 ring->budget = budget;
1864 ring->pkts_processed = 0;
1827 vxge_hw_vpath_poll_rx(ring->handle); 1865 vxge_hw_vpath_poll_rx(ring->handle);
1828 pkts_processed += ring->pkts_processed; 1866 pkts_processed += ring->pkts_processed;
1829 budget -= ring->pkts_processed; 1867 budget -= ring->pkts_processed;
@@ -2054,6 +2092,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
2054 netdev_get_tx_queue(vdev->ndev, 0); 2092 netdev_get_tx_queue(vdev->ndev, 0);
2055 vpath->fifo.indicate_max_pkts = 2093 vpath->fifo.indicate_max_pkts =
2056 vdev->config.fifo_indicate_max_pkts; 2094 vdev->config.fifo_indicate_max_pkts;
2095 vpath->fifo.tx_vector_no = 0;
2057 vpath->ring.rx_vector_no = 0; 2096 vpath->ring.rx_vector_no = 0;
2058 vpath->ring.rx_csum = vdev->rx_csum; 2097 vpath->ring.rx_csum = vdev->rx_csum;
2059 vpath->ring.rx_hwts = vdev->rx_hwts; 2098 vpath->ring.rx_hwts = vdev->rx_hwts;
@@ -2079,6 +2118,61 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
2079 return VXGE_HW_OK; 2118 return VXGE_HW_OK;
2080} 2119}
2081 2120
2121/**
2122 * adaptive_coalesce_tx_interrupts - Changes the interrupt coalescing
2123 * if the interrupts are not within a range
2124 * @fifo: pointer to transmit fifo structure
2125 * Description: The function changes boundary timer and restriction timer
2126 * value depends on the traffic
2127 * Return Value: None
2128 */
2129static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
2130{
2131 fifo->interrupt_count++;
2132 if (jiffies > fifo->jiffies + HZ / 100) {
2133 struct __vxge_hw_fifo *hw_fifo = fifo->handle;
2134
2135 fifo->jiffies = jiffies;
2136 if (fifo->interrupt_count > VXGE_T1A_MAX_TX_INTERRUPT_COUNT &&
2137 hw_fifo->rtimer != VXGE_TTI_RTIMER_ADAPT_VAL) {
2138 hw_fifo->rtimer = VXGE_TTI_RTIMER_ADAPT_VAL;
2139 vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
2140 } else if (hw_fifo->rtimer != 0) {
2141 hw_fifo->rtimer = 0;
2142 vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
2143 }
2144 fifo->interrupt_count = 0;
2145 }
2146}
2147
2148/**
2149 * adaptive_coalesce_rx_interrupts - Changes the interrupt coalescing
2150 * if the interrupts are not within a range
2151 * @ring: pointer to receive ring structure
2152 * Description: The function increases of decreases the packet counts within
2153 * the ranges of traffic utilization, if the interrupts due to this ring are
2154 * not within a fixed range.
2155 * Return Value: Nothing
2156 */
2157static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring)
2158{
2159 ring->interrupt_count++;
2160 if (jiffies > ring->jiffies + HZ / 100) {
2161 struct __vxge_hw_ring *hw_ring = ring->handle;
2162
2163 ring->jiffies = jiffies;
2164 if (ring->interrupt_count > VXGE_T1A_MAX_INTERRUPT_COUNT &&
2165 hw_ring->rtimer != VXGE_RTI_RTIMER_ADAPT_VAL) {
2166 hw_ring->rtimer = VXGE_RTI_RTIMER_ADAPT_VAL;
2167 vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
2168 } else if (hw_ring->rtimer != 0) {
2169 hw_ring->rtimer = 0;
2170 vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
2171 }
2172 ring->interrupt_count = 0;
2173 }
2174}
2175
2082/* 2176/*
2083 * vxge_isr_napi 2177 * vxge_isr_napi
2084 * @irq: the irq of the device. 2178 * @irq: the irq of the device.
@@ -2139,24 +2233,39 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
2139 2233
2140#ifdef CONFIG_PCI_MSI 2234#ifdef CONFIG_PCI_MSI
2141 2235
2142static irqreturn_t 2236static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id)
2143vxge_tx_msix_handle(int irq, void *dev_id)
2144{ 2237{
2145 struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id; 2238 struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id;
2146 2239
2240 adaptive_coalesce_tx_interrupts(fifo);
2241
2242 vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)fifo->handle,
2243 fifo->tx_vector_no);
2244
2245 vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)fifo->handle,
2246 fifo->tx_vector_no);
2247
2147 VXGE_COMPLETE_VPATH_TX(fifo); 2248 VXGE_COMPLETE_VPATH_TX(fifo);
2148 2249
2250 vxge_hw_channel_msix_unmask((struct __vxge_hw_channel *)fifo->handle,
2251 fifo->tx_vector_no);
2252
2253 mmiowb();
2254
2149 return IRQ_HANDLED; 2255 return IRQ_HANDLED;
2150} 2256}
2151 2257
2152static irqreturn_t 2258static irqreturn_t vxge_rx_msix_napi_handle(int irq, void *dev_id)
2153vxge_rx_msix_napi_handle(int irq, void *dev_id)
2154{ 2259{
2155 struct vxge_ring *ring = (struct vxge_ring *)dev_id; 2260 struct vxge_ring *ring = (struct vxge_ring *)dev_id;
2156 2261
2157 /* MSIX_IDX for Rx is 1 */ 2262 adaptive_coalesce_rx_interrupts(ring);
2263
2158 vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle, 2264 vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle,
2159 ring->rx_vector_no); 2265 ring->rx_vector_no);
2266
2267 vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)ring->handle,
2268 ring->rx_vector_no);
2160 2269
2161 napi_schedule(&ring->napi); 2270 napi_schedule(&ring->napi);
2162 return IRQ_HANDLED; 2271 return IRQ_HANDLED;
@@ -2173,14 +2282,20 @@ vxge_alarm_msix_handle(int irq, void *dev_id)
2173 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID; 2282 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
2174 2283
2175 for (i = 0; i < vdev->no_of_vpath; i++) { 2284 for (i = 0; i < vdev->no_of_vpath; i++) {
2285 /* Reduce the chance of loosing alarm interrupts by masking
2286 * the vector. A pending bit will be set if an alarm is
2287 * generated and on unmask the interrupt will be fired.
2288 */
2176 vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id); 2289 vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id);
2290 vxge_hw_vpath_msix_clear(vdev->vpaths[i].handle, msix_id);
2291 mmiowb();
2177 2292
2178 status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle, 2293 status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
2179 vdev->exec_mode); 2294 vdev->exec_mode);
2180 if (status == VXGE_HW_OK) { 2295 if (status == VXGE_HW_OK) {
2181
2182 vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle, 2296 vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
2183 msix_id); 2297 msix_id);
2298 mmiowb();
2184 continue; 2299 continue;
2185 } 2300 }
2186 vxge_debug_intr(VXGE_ERR, 2301 vxge_debug_intr(VXGE_ERR,
@@ -2299,6 +2414,9 @@ static int vxge_enable_msix(struct vxgedev *vdev)
2299 vpath->ring.rx_vector_no = (vpath->device_id * 2414 vpath->ring.rx_vector_no = (vpath->device_id *
2300 VXGE_HW_VPATH_MSIX_ACTIVE) + 1; 2415 VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
2301 2416
2417 vpath->fifo.tx_vector_no = (vpath->device_id *
2418 VXGE_HW_VPATH_MSIX_ACTIVE);
2419
2302 vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id, 2420 vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
2303 VXGE_ALARM_MSIX_ID); 2421 VXGE_ALARM_MSIX_ID);
2304 } 2422 }
@@ -2474,8 +2592,9 @@ INTA_MODE:
2474 "%s:vxge:INTA", vdev->ndev->name); 2592 "%s:vxge:INTA", vdev->ndev->name);
2475 vxge_hw_device_set_intr_type(vdev->devh, 2593 vxge_hw_device_set_intr_type(vdev->devh,
2476 VXGE_HW_INTR_MODE_IRQLINE); 2594 VXGE_HW_INTR_MODE_IRQLINE);
2477 vxge_hw_vpath_tti_ci_set(vdev->devh, 2595
2478 vdev->vpaths[0].device_id); 2596 vxge_hw_vpath_tti_ci_set(vdev->vpaths[0].fifo.handle);
2597
2479 ret = request_irq((int) vdev->pdev->irq, 2598 ret = request_irq((int) vdev->pdev->irq,
2480 vxge_isr_napi, 2599 vxge_isr_napi,
2481 IRQF_SHARED, vdev->desc[0], vdev); 2600 IRQF_SHARED, vdev->desc[0], vdev);
@@ -2745,6 +2864,10 @@ static int vxge_open(struct net_device *dev)
2745 } 2864 }
2746 2865
2747 netif_tx_start_all_queues(vdev->ndev); 2866 netif_tx_start_all_queues(vdev->ndev);
2867
2868 /* configure CI */
2869 vxge_config_ci_for_tti_rti(vdev);
2870
2748 goto out0; 2871 goto out0;
2749 2872
2750out2: 2873out2:
@@ -3264,19 +3387,6 @@ static const struct net_device_ops vxge_netdev_ops = {
3264#endif 3387#endif
3265}; 3388};
3266 3389
3267static int __devinit vxge_device_revision(struct vxgedev *vdev)
3268{
3269 int ret;
3270 u8 revision;
3271
3272 ret = pci_read_config_byte(vdev->pdev, PCI_REVISION_ID, &revision);
3273 if (ret)
3274 return -EIO;
3275
3276 vdev->titan1 = (revision == VXGE_HW_TITAN1_PCI_REVISION);
3277 return 0;
3278}
3279
3280static int __devinit vxge_device_register(struct __vxge_hw_device *hldev, 3390static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3281 struct vxge_config *config, 3391 struct vxge_config *config,
3282 int high_dma, int no_of_vpath, 3392 int high_dma, int no_of_vpath,
@@ -3316,10 +3426,7 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3316 memcpy(&vdev->config, config, sizeof(struct vxge_config)); 3426 memcpy(&vdev->config, config, sizeof(struct vxge_config));
3317 vdev->rx_csum = 1; /* Enable Rx CSUM by default. */ 3427 vdev->rx_csum = 1; /* Enable Rx CSUM by default. */
3318 vdev->rx_hwts = 0; 3428 vdev->rx_hwts = 0;
3319 3429 vdev->titan1 = (vdev->pdev->revision == VXGE_HW_TITAN1_PCI_REVISION);
3320 ret = vxge_device_revision(vdev);
3321 if (ret < 0)
3322 goto _out1;
3323 3430
3324 SET_NETDEV_DEV(ndev, &vdev->pdev->dev); 3431 SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
3325 3432
@@ -3348,7 +3455,7 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3348 vxge_debug_init(VXGE_ERR, 3455 vxge_debug_init(VXGE_ERR,
3349 "%s: vpath memory allocation failed", 3456 "%s: vpath memory allocation failed",
3350 vdev->ndev->name); 3457 vdev->ndev->name);
3351 ret = -ENODEV; 3458 ret = -ENOMEM;
3352 goto _out1; 3459 goto _out1;
3353 } 3460 }
3354 3461
@@ -3369,11 +3476,11 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3369 if (vdev->config.gro_enable) 3476 if (vdev->config.gro_enable)
3370 ndev->features |= NETIF_F_GRO; 3477 ndev->features |= NETIF_F_GRO;
3371 3478
3372 if (register_netdev(ndev)) { 3479 ret = register_netdev(ndev);
3480 if (ret) {
3373 vxge_debug_init(vxge_hw_device_trace_level_get(hldev), 3481 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3374 "%s: %s : device registration failed!", 3482 "%s: %s : device registration failed!",
3375 ndev->name, __func__); 3483 ndev->name, __func__);
3376 ret = -ENODEV;
3377 goto _out2; 3484 goto _out2;
3378 } 3485 }
3379 3486
@@ -3444,6 +3551,11 @@ static void vxge_device_unregister(struct __vxge_hw_device *hldev)
3444 /* in 2.6 will call stop() if device is up */ 3551 /* in 2.6 will call stop() if device is up */
3445 unregister_netdev(dev); 3552 unregister_netdev(dev);
3446 3553
3554 kfree(vdev->vpaths);
3555
3556 /* we are safe to free it now */
3557 free_netdev(dev);
3558
3447 vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered", 3559 vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered",
3448 buf); 3560 buf);
3449 vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf, 3561 vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf,
@@ -3799,7 +3911,7 @@ static void __devinit vxge_device_config_init(
3799 break; 3911 break;
3800 3912
3801 case MSI_X: 3913 case MSI_X:
3802 device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX; 3914 device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX_ONE_SHOT;
3803 break; 3915 break;
3804 } 3916 }
3805 3917
@@ -4335,10 +4447,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4335 goto _exit1; 4447 goto _exit1;
4336 } 4448 }
4337 4449
4338 if (pci_request_region(pdev, 0, VXGE_DRIVER_NAME)) { 4450 ret = pci_request_region(pdev, 0, VXGE_DRIVER_NAME);
4451 if (ret) {
4339 vxge_debug_init(VXGE_ERR, 4452 vxge_debug_init(VXGE_ERR,
4340 "%s : request regions failed", __func__); 4453 "%s : request regions failed", __func__);
4341 ret = -ENODEV;
4342 goto _exit1; 4454 goto _exit1;
4343 } 4455 }
4344 4456
@@ -4446,7 +4558,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4446 if (!img[i].is_valid) 4558 if (!img[i].is_valid)
4447 break; 4559 break;
4448 vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version " 4560 vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version "
4449 "%d.%d.%d.%d\n", VXGE_DRIVER_NAME, i, 4561 "%d.%d.%d.%d", VXGE_DRIVER_NAME, i,
4450 VXGE_EPROM_IMG_MAJOR(img[i].version), 4562 VXGE_EPROM_IMG_MAJOR(img[i].version),
4451 VXGE_EPROM_IMG_MINOR(img[i].version), 4563 VXGE_EPROM_IMG_MINOR(img[i].version),
4452 VXGE_EPROM_IMG_FIX(img[i].version), 4564 VXGE_EPROM_IMG_FIX(img[i].version),
@@ -4643,8 +4755,9 @@ _exit6:
4643_exit5: 4755_exit5:
4644 vxge_device_unregister(hldev); 4756 vxge_device_unregister(hldev);
4645_exit4: 4757_exit4:
4646 pci_disable_sriov(pdev); 4758 pci_set_drvdata(pdev, NULL);
4647 vxge_hw_device_terminate(hldev); 4759 vxge_hw_device_terminate(hldev);
4760 pci_disable_sriov(pdev);
4648_exit3: 4761_exit3:
4649 iounmap(attr.bar0); 4762 iounmap(attr.bar0);
4650_exit2: 4763_exit2:
@@ -4655,7 +4768,7 @@ _exit0:
4655 kfree(ll_config); 4768 kfree(ll_config);
4656 kfree(device_config); 4769 kfree(device_config);
4657 driver_config->config_dev_cnt--; 4770 driver_config->config_dev_cnt--;
4658 pci_set_drvdata(pdev, NULL); 4771 driver_config->total_dev_cnt--;
4659 return ret; 4772 return ret;
4660} 4773}
4661 4774
@@ -4668,45 +4781,34 @@ _exit0:
4668static void __devexit vxge_remove(struct pci_dev *pdev) 4781static void __devexit vxge_remove(struct pci_dev *pdev)
4669{ 4782{
4670 struct __vxge_hw_device *hldev; 4783 struct __vxge_hw_device *hldev;
4671 struct vxgedev *vdev = NULL; 4784 struct vxgedev *vdev;
4672 struct net_device *dev; 4785 int i;
4673 int i = 0;
4674 4786
4675 hldev = pci_get_drvdata(pdev); 4787 hldev = pci_get_drvdata(pdev);
4676
4677 if (hldev == NULL) 4788 if (hldev == NULL)
4678 return; 4789 return;
4679 4790
4680 dev = hldev->ndev; 4791 vdev = netdev_priv(hldev->ndev);
4681 vdev = netdev_priv(dev);
4682 4792
4683 vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__); 4793 vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__);
4684
4685 vxge_debug_init(vdev->level_trace, "%s : removing PCI device...", 4794 vxge_debug_init(vdev->level_trace, "%s : removing PCI device...",
4686 __func__); 4795 __func__);
4687 vxge_device_unregister(hldev);
4688 4796
4689 for (i = 0; i < vdev->no_of_vpath; i++) { 4797 for (i = 0; i < vdev->no_of_vpath; i++)
4690 vxge_free_mac_add_list(&vdev->vpaths[i]); 4798 vxge_free_mac_add_list(&vdev->vpaths[i]);
4691 vdev->vpaths[i].mcast_addr_cnt = 0;
4692 vdev->vpaths[i].mac_addr_cnt = 0;
4693 }
4694
4695 kfree(vdev->vpaths);
4696 4799
4800 vxge_device_unregister(hldev);
4801 pci_set_drvdata(pdev, NULL);
4802 /* Do not call pci_disable_sriov here, as it will break child devices */
4803 vxge_hw_device_terminate(hldev);
4697 iounmap(vdev->bar0); 4804 iounmap(vdev->bar0);
4698 4805 pci_release_region(pdev, 0);
4699 /* we are safe to free it now */ 4806 pci_disable_device(pdev);
4700 free_netdev(dev); 4807 driver_config->config_dev_cnt--;
4808 driver_config->total_dev_cnt--;
4701 4809
4702 vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered", 4810 vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered",
4703 __func__, __LINE__); 4811 __func__, __LINE__);
4704
4705 vxge_hw_device_terminate(hldev);
4706
4707 pci_disable_device(pdev);
4708 pci_release_region(pdev, 0);
4709 pci_set_drvdata(pdev, NULL);
4710 vxge_debug_entryexit(vdev->level_trace, "%s:%d Exiting...", __func__, 4812 vxge_debug_entryexit(vdev->level_trace, "%s:%d Exiting...", __func__,
4711 __LINE__); 4813 __LINE__);
4712} 4814}
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
index 5746fedc356..40474f0da57 100644
--- a/drivers/net/vxge/vxge-main.h
+++ b/drivers/net/vxge/vxge-main.h
@@ -59,11 +59,13 @@
59#define VXGE_TTI_LTIMER_VAL 1000 59#define VXGE_TTI_LTIMER_VAL 1000
60#define VXGE_T1A_TTI_LTIMER_VAL 80 60#define VXGE_T1A_TTI_LTIMER_VAL 80
61#define VXGE_TTI_RTIMER_VAL 0 61#define VXGE_TTI_RTIMER_VAL 0
62#define VXGE_TTI_RTIMER_ADAPT_VAL 10
62#define VXGE_T1A_TTI_RTIMER_VAL 400 63#define VXGE_T1A_TTI_RTIMER_VAL 400
63#define VXGE_RTI_BTIMER_VAL 250 64#define VXGE_RTI_BTIMER_VAL 250
64#define VXGE_RTI_LTIMER_VAL 100 65#define VXGE_RTI_LTIMER_VAL 100
65#define VXGE_RTI_RTIMER_VAL 0 66#define VXGE_RTI_RTIMER_VAL 0
66#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH 67#define VXGE_RTI_RTIMER_ADAPT_VAL 15
68#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH
67#define VXGE_ISR_POLLING_CNT 8 69#define VXGE_ISR_POLLING_CNT 8
68#define VXGE_MAX_CONFIG_DEV 0xFF 70#define VXGE_MAX_CONFIG_DEV 0xFF
69#define VXGE_EXEC_MODE_DISABLE 0 71#define VXGE_EXEC_MODE_DISABLE 0
@@ -107,6 +109,14 @@
107#define RTI_T1A_RX_UFC_C 50 109#define RTI_T1A_RX_UFC_C 50
108#define RTI_T1A_RX_UFC_D 60 110#define RTI_T1A_RX_UFC_D 60
109 111
112/*
113 * The interrupt rate is maintained at 3k per second with the moderation
114 * parameters for most traffic but not all. This is the maximum interrupt
115 * count allowed per function with INTA or per vector in the case of
116 * MSI-X in a 10 millisecond time period. Enabled only for Titan 1A.
117 */
118#define VXGE_T1A_MAX_INTERRUPT_COUNT 100
119#define VXGE_T1A_MAX_TX_INTERRUPT_COUNT 200
110 120
111/* Milli secs timer period */ 121/* Milli secs timer period */
112#define VXGE_TIMER_DELAY 10000 122#define VXGE_TIMER_DELAY 10000
@@ -247,6 +257,11 @@ struct vxge_fifo {
247 int tx_steering_type; 257 int tx_steering_type;
248 int indicate_max_pkts; 258 int indicate_max_pkts;
249 259
260 /* Adaptive interrupt moderation parameters used in T1A */
261 unsigned long interrupt_count;
262 unsigned long jiffies;
263
264 u32 tx_vector_no;
250 /* Tx stats */ 265 /* Tx stats */
251 struct vxge_fifo_stats stats; 266 struct vxge_fifo_stats stats;
252} ____cacheline_aligned; 267} ____cacheline_aligned;
@@ -271,6 +286,10 @@ struct vxge_ring {
271 */ 286 */
272 int driver_id; 287 int driver_id;
273 288
289 /* Adaptive interrupt moderation parameters used in T1A */
290 unsigned long interrupt_count;
291 unsigned long jiffies;
292
274 /* copy of the flag indicating whether rx_csum is to be used */ 293 /* copy of the flag indicating whether rx_csum is to be used */
275 u32 rx_csum:1, 294 u32 rx_csum:1,
276 rx_hwts:1; 295 rx_hwts:1;
@@ -286,7 +305,7 @@ struct vxge_ring {
286 305
287 int vlan_tag_strip; 306 int vlan_tag_strip;
288 struct vlan_group *vlgrp; 307 struct vlan_group *vlgrp;
289 int rx_vector_no; 308 u32 rx_vector_no;
290 enum vxge_hw_status last_status; 309 enum vxge_hw_status last_status;
291 310
292 /* Rx stats */ 311 /* Rx stats */
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
index 4c10d6c4075..8674f331311 100644
--- a/drivers/net/vxge/vxge-traffic.c
+++ b/drivers/net/vxge/vxge-traffic.c
@@ -218,6 +218,68 @@ exit:
218 return status; 218 return status;
219} 219}
220 220
221void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo)
222{
223 struct vxge_hw_vpath_reg __iomem *vp_reg;
224 struct vxge_hw_vp_config *config;
225 u64 val64;
226
227 if (fifo->config->enable != VXGE_HW_FIFO_ENABLE)
228 return;
229
230 vp_reg = fifo->vp_reg;
231 config = container_of(fifo->config, struct vxge_hw_vp_config, fifo);
232
233 if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
234 config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
235 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
236 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
237 fifo->tim_tti_cfg1_saved = val64;
238 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
239 }
240}
241
242void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring)
243{
244 u64 val64 = ring->tim_rti_cfg1_saved;
245
246 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
247 ring->tim_rti_cfg1_saved = val64;
248 writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
249}
250
251void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo)
252{
253 u64 val64 = fifo->tim_tti_cfg3_saved;
254 u64 timer = (fifo->rtimer * 1000) / 272;
255
256 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
257 if (timer)
258 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
259 VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5);
260
261 writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
262 /* tti_cfg3_saved is not updated again because it is
263 * initialized at one place only - init time.
264 */
265}
266
267void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring)
268{
269 u64 val64 = ring->tim_rti_cfg3_saved;
270 u64 timer = (ring->rtimer * 1000) / 272;
271
272 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
273 if (timer)
274 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
275 VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4);
276
277 writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
278 /* rti_cfg3_saved is not updated again because it is
279 * initialized at one place only - init time.
280 */
281}
282
221/** 283/**
222 * vxge_hw_channel_msix_mask - Mask MSIX Vector. 284 * vxge_hw_channel_msix_mask - Mask MSIX Vector.
223 * @channeh: Channel for rx or tx handle 285 * @channeh: Channel for rx or tx handle
@@ -254,6 +316,23 @@ vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
254} 316}
255 317
256/** 318/**
319 * vxge_hw_channel_msix_clear - Unmask the MSIX Vector.
320 * @channel: Channel for rx or tx handle
321 * @msix_id: MSI ID
322 *
323 * The function unmasks the msix interrupt for the given msix_id
324 * if configured in MSIX oneshot mode
325 *
326 * Returns: 0
327 */
328void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id)
329{
330 __vxge_hw_pio_mem_write32_upper(
331 (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
332 &channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
333}
334
335/**
257 * vxge_hw_device_set_intr_type - Updates the configuration 336 * vxge_hw_device_set_intr_type - Updates the configuration
258 * with new interrupt type. 337 * with new interrupt type.
259 * @hldev: HW device handle. 338 * @hldev: HW device handle.
@@ -2191,19 +2270,14 @@ vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
2191 if (vpath->hldev->config.intr_mode == 2270 if (vpath->hldev->config.intr_mode ==
2192 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) { 2271 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2193 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( 2272 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2273 VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN,
2274 0, 32), &vp_reg->one_shot_vect0_en);
2275 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2194 VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN, 2276 VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
2195 0, 32), &vp_reg->one_shot_vect1_en); 2277 0, 32), &vp_reg->one_shot_vect1_en);
2196 }
2197
2198 if (vpath->hldev->config.intr_mode ==
2199 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2200 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( 2278 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2201 VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN, 2279 VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
2202 0, 32), &vp_reg->one_shot_vect2_en); 2280 0, 32), &vp_reg->one_shot_vect2_en);
2203
2204 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2205 VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN,
2206 0, 32), &vp_reg->one_shot_vect3_en);
2207 } 2281 }
2208} 2282}
2209 2283
@@ -2229,6 +2303,32 @@ vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2229} 2303}
2230 2304
2231/** 2305/**
2306 * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
2307 * @vp: Virtual Path handle.
2308 * @msix_id: MSI ID
2309 *
2310 * The function clears the msix interrupt for the given msix_id
2311 *
2312 * Returns: 0,
2313 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2314 * status.
2315 * See also:
2316 */
2317void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
2318{
2319 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2320
2321 if ((hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT))
2322 __vxge_hw_pio_mem_write32_upper(
2323 (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2324 &hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
2325 else
2326 __vxge_hw_pio_mem_write32_upper(
2327 (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2328 &hldev->common_reg->clear_msix_mask_vect[msix_id % 4]);
2329}
2330
2331/**
2232 * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector. 2332 * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
2233 * @vp: Virtual Path handle. 2333 * @vp: Virtual Path handle.
2234 * @msix_id: MSI ID 2334 * @msix_id: MSI ID
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
index d48486d6afa..9d9dfda4c7a 100644
--- a/drivers/net/vxge/vxge-traffic.h
+++ b/drivers/net/vxge/vxge-traffic.h
@@ -2142,6 +2142,10 @@ void vxge_hw_device_clear_tx_rx(
2142 * Virtual Paths 2142 * Virtual Paths
2143 */ 2143 */
2144 2144
2145void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring);
2146
2147void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo);
2148
2145u32 vxge_hw_vpath_id( 2149u32 vxge_hw_vpath_id(
2146 struct __vxge_hw_vpath_handle *vpath_handle); 2150 struct __vxge_hw_vpath_handle *vpath_handle);
2147 2151
@@ -2245,6 +2249,8 @@ void
2245vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vpath_handle, 2249vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vpath_handle,
2246 int msix_id); 2250 int msix_id);
2247 2251
2252void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id);
2253
2248void vxge_hw_device_flush_io(struct __vxge_hw_device *devh); 2254void vxge_hw_device_flush_io(struct __vxge_hw_device *devh);
2249 2255
2250void 2256void
@@ -2270,6 +2276,9 @@ void
2270vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id); 2276vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id);
2271 2277
2272void 2278void
2279vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channelh, int msix_id);
2280
2281void
2273vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, 2282vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel,
2274 void **dtrh); 2283 void **dtrh);
2275 2284
@@ -2282,7 +2291,8 @@ vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh);
2282int 2291int
2283vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel); 2292vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel);
2284 2293
2285void 2294void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo);
2286vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id); 2295
2296void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring);
2287 2297
2288#endif 2298#endif
diff --git a/drivers/net/vxge/vxge-version.h b/drivers/net/vxge/vxge-version.h
index ad2f99b9bcf..581e21525e8 100644
--- a/drivers/net/vxge/vxge-version.h
+++ b/drivers/net/vxge/vxge-version.h
@@ -16,8 +16,8 @@
16 16
17#define VXGE_VERSION_MAJOR "2" 17#define VXGE_VERSION_MAJOR "2"
18#define VXGE_VERSION_MINOR "5" 18#define VXGE_VERSION_MINOR "5"
19#define VXGE_VERSION_FIX "1" 19#define VXGE_VERSION_FIX "2"
20#define VXGE_VERSION_BUILD "22082" 20#define VXGE_VERSION_BUILD "22259"
21#define VXGE_VERSION_FOR "k" 21#define VXGE_VERSION_FOR "k"
22 22
23#define VXGE_FW_VER(maj, min, bld) (((maj) << 16) + ((min) << 8) + (bld)) 23#define VXGE_FW_VER(maj, min, bld) (((maj) << 16) + ((min) << 8) + (bld))
diff --git a/drivers/net/wireless/ath/ar9170/Kconfig b/drivers/net/wireless/ath/ar9170/Kconfig
index d7a4799d20f..7b9672b0d09 100644
--- a/drivers/net/wireless/ath/ar9170/Kconfig
+++ b/drivers/net/wireless/ath/ar9170/Kconfig
@@ -1,8 +1,10 @@
1config AR9170_USB 1config AR9170_USB
2 tristate "Atheros AR9170 802.11n USB support" 2 tristate "Atheros AR9170 802.11n USB support (OBSOLETE)"
3 depends on USB && MAC80211 3 depends on USB && MAC80211
4 select FW_LOADER 4 select FW_LOADER
5 help 5 help
6 This driver is going to get replaced by carl9170.
7
6 This is a driver for the Atheros "otus" 802.11n USB devices. 8 This is a driver for the Atheros "otus" 802.11n USB devices.
7 9
8 These devices require additional firmware (2 files). 10 These devices require additional firmware (2 files).
diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
index 32bf79e6a32..a9111e1161f 100644
--- a/drivers/net/wireless/ath/ar9170/main.c
+++ b/drivers/net/wireless/ath/ar9170/main.c
@@ -1945,7 +1945,8 @@ static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue,
1945static int ar9170_ampdu_action(struct ieee80211_hw *hw, 1945static int ar9170_ampdu_action(struct ieee80211_hw *hw,
1946 struct ieee80211_vif *vif, 1946 struct ieee80211_vif *vif,
1947 enum ieee80211_ampdu_mlme_action action, 1947 enum ieee80211_ampdu_mlme_action action,
1948 struct ieee80211_sta *sta, u16 tid, u16 *ssn) 1948 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
1949 u8 buf_size)
1949{ 1950{
1950 switch (action) { 1951 switch (action) {
1951 case IEEE80211_AMPDU_RX_START: 1952 case IEEE80211_AMPDU_RX_START:
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index e43210c8585..a6c6a466000 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -108,12 +108,14 @@ enum ath_cipher {
108 * struct ath_ops - Register read/write operations 108 * struct ath_ops - Register read/write operations
109 * 109 *
110 * @read: Register read 110 * @read: Register read
111 * @multi_read: Multiple register read
111 * @write: Register write 112 * @write: Register write
112 * @enable_write_buffer: Enable multiple register writes 113 * @enable_write_buffer: Enable multiple register writes
113 * @write_flush: flush buffered register writes and disable buffering 114 * @write_flush: flush buffered register writes and disable buffering
114 */ 115 */
115struct ath_ops { 116struct ath_ops {
116 unsigned int (*read)(void *, u32 reg_offset); 117 unsigned int (*read)(void *, u32 reg_offset);
118 void (*multi_read)(void *, u32 *addr, u32 *val, u16 count);
117 void (*write)(void *, u32 val, u32 reg_offset); 119 void (*write)(void *, u32 val, u32 reg_offset);
118 void (*enable_write_buffer)(void *); 120 void (*enable_write_buffer)(void *);
119 void (*write_flush) (void *); 121 void (*write_flush) (void *);
diff --git a/drivers/net/wireless/ath/ath5k/Kconfig b/drivers/net/wireless/ath/ath5k/Kconfig
index e0793319389..e18a9aa7b6c 100644
--- a/drivers/net/wireless/ath/ath5k/Kconfig
+++ b/drivers/net/wireless/ath/ath5k/Kconfig
@@ -40,6 +40,17 @@ config ATH5K_DEBUG
40 40
41 modprobe ath5k debug=0x00000400 41 modprobe ath5k debug=0x00000400
42 42
43config ATH5K_TRACER
44 bool "Atheros 5xxx tracer"
45 depends on ATH5K
46 depends on EVENT_TRACING
47 ---help---
48 Say Y here to enable tracepoints for the ath5k driver
49 using the kernel tracing infrastructure. Select this
50 option if you are interested in debugging the driver.
51
52 If unsure, say N.
53
43config ATH5K_AHB 54config ATH5K_AHB
44 bool "Atheros 5xxx AHB bus support" 55 bool "Atheros 5xxx AHB bus support"
45 depends on (ATHEROS_AR231X && !PCI) 56 depends on (ATHEROS_AR231X && !PCI)
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
index 707cde14924..ae84b86c3bf 100644
--- a/drivers/net/wireless/ath/ath5k/ahb.c
+++ b/drivers/net/wireless/ath/ath5k/ahb.c
@@ -31,7 +31,8 @@ static void ath5k_ahb_read_cachesize(struct ath_common *common, int *csz)
31 *csz = L1_CACHE_BYTES >> 2; 31 *csz = L1_CACHE_BYTES >> 2;
32} 32}
33 33
34bool ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data) 34static bool
35ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
35{ 36{
36 struct ath5k_softc *sc = common->priv; 37 struct ath5k_softc *sc = common->priv;
37 struct platform_device *pdev = to_platform_device(sc->dev); 38 struct platform_device *pdev = to_platform_device(sc->dev);
@@ -46,10 +47,10 @@ bool ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
46 47
47 eeprom += off; 48 eeprom += off;
48 if (eeprom > eeprom_end) 49 if (eeprom > eeprom_end)
49 return -EINVAL; 50 return false;
50 51
51 *data = *eeprom; 52 *data = *eeprom;
52 return 0; 53 return true;
53} 54}
54 55
55int ath5k_hw_read_srev(struct ath5k_hw *ah) 56int ath5k_hw_read_srev(struct ath5k_hw *ah)
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 407e39c2b10..70abb61e9ef 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -210,14 +210,9 @@
210/* Initial values */ 210/* Initial values */
211#define AR5K_INIT_CYCRSSI_THR1 2 211#define AR5K_INIT_CYCRSSI_THR1 2
212 212
213/* Tx retry limits */ 213/* Tx retry limit defaults from standard */
214#define AR5K_INIT_SH_RETRY 10 214#define AR5K_INIT_RETRY_SHORT 7
215#define AR5K_INIT_LG_RETRY AR5K_INIT_SH_RETRY 215#define AR5K_INIT_RETRY_LONG 4
216/* For station mode */
217#define AR5K_INIT_SSH_RETRY 32
218#define AR5K_INIT_SLG_RETRY AR5K_INIT_SSH_RETRY
219#define AR5K_INIT_TX_RETRY 10
220
221 216
222/* Slot time */ 217/* Slot time */
223#define AR5K_INIT_SLOT_TIME_TURBO 6 218#define AR5K_INIT_SLOT_TIME_TURBO 6
@@ -1057,7 +1052,9 @@ struct ath5k_hw {
1057#define ah_modes ah_capabilities.cap_mode 1052#define ah_modes ah_capabilities.cap_mode
1058#define ah_ee_version ah_capabilities.cap_eeprom.ee_version 1053#define ah_ee_version ah_capabilities.cap_eeprom.ee_version
1059 1054
1060 u32 ah_limit_tx_retries; 1055 u8 ah_retry_long;
1056 u8 ah_retry_short;
1057
1061 u8 ah_coverage_class; 1058 u8 ah_coverage_class;
1062 bool ah_ack_bitrate_high; 1059 bool ah_ack_bitrate_high;
1063 u8 ah_bwmode; 1060 u8 ah_bwmode;
@@ -1067,7 +1064,6 @@ struct ath5k_hw {
1067 u8 ah_ant_mode; 1064 u8 ah_ant_mode;
1068 u8 ah_tx_ant; 1065 u8 ah_tx_ant;
1069 u8 ah_def_ant; 1066 u8 ah_def_ant;
1070 bool ah_software_retry;
1071 1067
1072 struct ath5k_capabilities ah_capabilities; 1068 struct ath5k_capabilities ah_capabilities;
1073 1069
@@ -1162,6 +1158,26 @@ void ath5k_hw_deinit(struct ath5k_hw *ah);
1162int ath5k_sysfs_register(struct ath5k_softc *sc); 1158int ath5k_sysfs_register(struct ath5k_softc *sc);
1163void ath5k_sysfs_unregister(struct ath5k_softc *sc); 1159void ath5k_sysfs_unregister(struct ath5k_softc *sc);
1164 1160
1161/* base.c */
1162struct ath5k_buf;
1163struct ath5k_txq;
1164
1165void set_beacon_filter(struct ieee80211_hw *hw, bool enable);
1166bool ath_any_vif_assoc(struct ath5k_softc *sc);
1167int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
1168 struct ath5k_txq *txq);
1169int ath5k_init_hw(struct ath5k_softc *sc);
1170int ath5k_stop_hw(struct ath5k_softc *sc);
1171void ath5k_mode_setup(struct ath5k_softc *sc, struct ieee80211_vif *vif);
1172void ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
1173 struct ieee80211_vif *vif);
1174int ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan);
1175void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf);
1176int ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
1177void ath5k_beacon_config(struct ath5k_softc *sc);
1178void ath5k_txbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf);
1179void ath5k_rxbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf);
1180
1165/*Chip id helper functions */ 1181/*Chip id helper functions */
1166const char *ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val); 1182const char *ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val);
1167int ath5k_hw_read_srev(struct ath5k_hw *ah); 1183int ath5k_hw_read_srev(struct ath5k_hw *ah);
@@ -1250,6 +1266,8 @@ int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
1250int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, 1266int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah,
1251 enum ath5k_tx_queue queue_type, 1267 enum ath5k_tx_queue queue_type,
1252 struct ath5k_txq_info *queue_info); 1268 struct ath5k_txq_info *queue_info);
1269void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
1270 unsigned int queue);
1253u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue); 1271u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue);
1254void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue); 1272void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue);
1255int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue); 1273int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue);
diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
index cdac5cff017..bc824056048 100644
--- a/drivers/net/wireless/ath/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -118,8 +118,8 @@ int ath5k_hw_init(struct ath5k_softc *sc)
118 ah->ah_bwmode = AR5K_BWMODE_DEFAULT; 118 ah->ah_bwmode = AR5K_BWMODE_DEFAULT;
119 ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER; 119 ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER;
120 ah->ah_imr = 0; 120 ah->ah_imr = 0;
121 ah->ah_limit_tx_retries = AR5K_INIT_TX_RETRY; 121 ah->ah_retry_short = AR5K_INIT_RETRY_SHORT;
122 ah->ah_software_retry = false; 122 ah->ah_retry_long = AR5K_INIT_RETRY_LONG;
123 ah->ah_ant_mode = AR5K_ANTMODE_DEFAULT; 123 ah->ah_ant_mode = AR5K_ANTMODE_DEFAULT;
124 ah->ah_noise_floor = -95; /* until first NF calibration is run */ 124 ah->ah_noise_floor = -95; /* until first NF calibration is run */
125 sc->ani_state.ani_mode = ATH5K_ANI_MODE_AUTO; 125 sc->ani_state.ani_mode = ATH5K_ANI_MODE_AUTO;
@@ -220,7 +220,8 @@ int ath5k_hw_init(struct ath5k_softc *sc)
220 ah->ah_radio = AR5K_RF5112; 220 ah->ah_radio = AR5K_RF5112;
221 ah->ah_single_chip = false; 221 ah->ah_single_chip = false;
222 ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_5112B; 222 ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_5112B;
223 } else if (ah->ah_mac_version == (AR5K_SREV_AR2415 >> 4)) { 223 } else if (ah->ah_mac_version == (AR5K_SREV_AR2415 >> 4) ||
224 ah->ah_mac_version == (AR5K_SREV_AR2315_R6 >> 4)) {
224 ah->ah_radio = AR5K_RF2316; 225 ah->ah_radio = AR5K_RF2316;
225 ah->ah_single_chip = true; 226 ah->ah_single_chip = true;
226 ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_2316; 227 ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_2316;
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 09ae4ef0fd5..dbc45e08543 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -61,6 +61,9 @@
61#include "debug.h" 61#include "debug.h"
62#include "ani.h" 62#include "ani.h"
63 63
64#define CREATE_TRACE_POINTS
65#include "trace.h"
66
64int ath5k_modparam_nohwcrypt; 67int ath5k_modparam_nohwcrypt;
65module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO); 68module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO);
66MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); 69MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
@@ -242,73 +245,68 @@ static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *re
242\********************/ 245\********************/
243 246
244/* 247/*
245 * Convert IEEE channel number to MHz frequency.
246 */
247static inline short
248ath5k_ieee2mhz(short chan)
249{
250 if (chan <= 14 || chan >= 27)
251 return ieee80211chan2mhz(chan);
252 else
253 return 2212 + chan * 20;
254}
255
256/*
257 * Returns true for the channel numbers used without all_channels modparam. 248 * Returns true for the channel numbers used without all_channels modparam.
258 */ 249 */
259static bool ath5k_is_standard_channel(short chan) 250static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
260{ 251{
261 return ((chan <= 14) || 252 if (band == IEEE80211_BAND_2GHZ && chan <= 14)
262 /* UNII 1,2 */ 253 return true;
263 ((chan & 3) == 0 && chan >= 36 && chan <= 64) || 254
255 return /* UNII 1,2 */
256 (((chan & 3) == 0 && chan >= 36 && chan <= 64) ||
264 /* midband */ 257 /* midband */
265 ((chan & 3) == 0 && chan >= 100 && chan <= 140) || 258 ((chan & 3) == 0 && chan >= 100 && chan <= 140) ||
266 /* UNII-3 */ 259 /* UNII-3 */
267 ((chan & 3) == 1 && chan >= 149 && chan <= 165)); 260 ((chan & 3) == 1 && chan >= 149 && chan <= 165) ||
261 /* 802.11j 5.030-5.080 GHz (20MHz) */
262 (chan == 8 || chan == 12 || chan == 16) ||
263 /* 802.11j 4.9GHz (20MHz) */
264 (chan == 184 || chan == 188 || chan == 192 || chan == 196));
268} 265}
269 266
270static unsigned int 267static unsigned int
271ath5k_copy_channels(struct ath5k_hw *ah, 268ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels,
272 struct ieee80211_channel *channels, 269 unsigned int mode, unsigned int max)
273 unsigned int mode,
274 unsigned int max)
275{ 270{
276 unsigned int i, count, size, chfreq, freq, ch; 271 unsigned int count, size, chfreq, freq, ch;
277 272 enum ieee80211_band band;
278 if (!test_bit(mode, ah->ah_modes))
279 return 0;
280 273
281 switch (mode) { 274 switch (mode) {
282 case AR5K_MODE_11A: 275 case AR5K_MODE_11A:
283 /* 1..220, but 2GHz frequencies are filtered by check_channel */ 276 /* 1..220, but 2GHz frequencies are filtered by check_channel */
284 size = 220 ; 277 size = 220;
285 chfreq = CHANNEL_5GHZ; 278 chfreq = CHANNEL_5GHZ;
279 band = IEEE80211_BAND_5GHZ;
286 break; 280 break;
287 case AR5K_MODE_11B: 281 case AR5K_MODE_11B:
288 case AR5K_MODE_11G: 282 case AR5K_MODE_11G:
289 size = 26; 283 size = 26;
290 chfreq = CHANNEL_2GHZ; 284 chfreq = CHANNEL_2GHZ;
285 band = IEEE80211_BAND_2GHZ;
291 break; 286 break;
292 default: 287 default:
293 ATH5K_WARN(ah->ah_sc, "bad mode, not copying channels\n"); 288 ATH5K_WARN(ah->ah_sc, "bad mode, not copying channels\n");
294 return 0; 289 return 0;
295 } 290 }
296 291
297 for (i = 0, count = 0; i < size && max > 0; i++) { 292 count = 0;
298 ch = i + 1 ; 293 for (ch = 1; ch <= size && count < max; ch++) {
299 freq = ath5k_ieee2mhz(ch); 294 freq = ieee80211_channel_to_frequency(ch, band);
295
296 if (freq == 0) /* mapping failed - not a standard channel */
297 continue;
300 298
301 /* Check if channel is supported by the chipset */ 299 /* Check if channel is supported by the chipset */
302 if (!ath5k_channel_ok(ah, freq, chfreq)) 300 if (!ath5k_channel_ok(ah, freq, chfreq))
303 continue; 301 continue;
304 302
305 if (!modparam_all_channels && !ath5k_is_standard_channel(ch)) 303 if (!modparam_all_channels &&
304 !ath5k_is_standard_channel(ch, band))
306 continue; 305 continue;
307 306
308 /* Write channel info and increment counter */ 307 /* Write channel info and increment counter */
309 channels[count].center_freq = freq; 308 channels[count].center_freq = freq;
310 channels[count].band = (chfreq == CHANNEL_2GHZ) ? 309 channels[count].band = band;
311 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
312 switch (mode) { 310 switch (mode) {
313 case AR5K_MODE_11A: 311 case AR5K_MODE_11A:
314 case AR5K_MODE_11G: 312 case AR5K_MODE_11G:
@@ -319,7 +317,6 @@ ath5k_copy_channels(struct ath5k_hw *ah,
319 } 317 }
320 318
321 count++; 319 count++;
322 max--;
323 } 320 }
324 321
325 return count; 322 return count;
@@ -364,7 +361,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
364 sband->n_bitrates = 12; 361 sband->n_bitrates = 12;
365 362
366 sband->channels = sc->channels; 363 sband->channels = sc->channels;
367 sband->n_channels = ath5k_copy_channels(ah, sband->channels, 364 sband->n_channels = ath5k_setup_channels(ah, sband->channels,
368 AR5K_MODE_11G, max_c); 365 AR5K_MODE_11G, max_c);
369 366
370 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband; 367 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
@@ -390,7 +387,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
390 } 387 }
391 388
392 sband->channels = sc->channels; 389 sband->channels = sc->channels;
393 sband->n_channels = ath5k_copy_channels(ah, sband->channels, 390 sband->n_channels = ath5k_setup_channels(ah, sband->channels,
394 AR5K_MODE_11B, max_c); 391 AR5K_MODE_11B, max_c);
395 392
396 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband; 393 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
@@ -410,7 +407,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
410 sband->n_bitrates = 8; 407 sband->n_bitrates = 8;
411 408
412 sband->channels = &sc->channels[count_c]; 409 sband->channels = &sc->channels[count_c];
413 sband->n_channels = ath5k_copy_channels(ah, sband->channels, 410 sband->n_channels = ath5k_setup_channels(ah, sband->channels,
414 AR5K_MODE_11A, max_c); 411 AR5K_MODE_11A, max_c);
415 412
416 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband; 413 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
@@ -445,18 +442,6 @@ ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan)
445 return ath5k_reset(sc, chan, true); 442 return ath5k_reset(sc, chan, true);
446} 443}
447 444
448static void
449ath5k_setcurmode(struct ath5k_softc *sc, unsigned int mode)
450{
451 sc->curmode = mode;
452
453 if (mode == AR5K_MODE_11A) {
454 sc->curband = &sc->sbands[IEEE80211_BAND_5GHZ];
455 } else {
456 sc->curband = &sc->sbands[IEEE80211_BAND_2GHZ];
457 }
458}
459
460struct ath_vif_iter_data { 445struct ath_vif_iter_data {
461 const u8 *hw_macaddr; 446 const u8 *hw_macaddr;
462 u8 mask[ETH_ALEN]; 447 u8 mask[ETH_ALEN];
@@ -569,7 +554,7 @@ ath5k_hw_to_driver_rix(struct ath5k_softc *sc, int hw_rix)
569 "hw_rix out of bounds: %x\n", hw_rix)) 554 "hw_rix out of bounds: %x\n", hw_rix))
570 return 0; 555 return 0;
571 556
572 rix = sc->rate_idx[sc->curband->band][hw_rix]; 557 rix = sc->rate_idx[sc->curchan->band][hw_rix];
573 if (WARN(rix < 0, "invalid hw_rix: %x\n", hw_rix)) 558 if (WARN(rix < 0, "invalid hw_rix: %x\n", hw_rix))
574 rix = 0; 559 rix = 0;
575 560
@@ -1379,7 +1364,7 @@ ath5k_receive_frame(struct ath5k_softc *sc, struct sk_buff *skb,
1379 rxs->flag |= RX_FLAG_TSFT; 1364 rxs->flag |= RX_FLAG_TSFT;
1380 1365
1381 rxs->freq = sc->curchan->center_freq; 1366 rxs->freq = sc->curchan->center_freq;
1382 rxs->band = sc->curband->band; 1367 rxs->band = sc->curchan->band;
1383 1368
1384 rxs->signal = sc->ah->ah_noise_floor + rs->rs_rssi; 1369 rxs->signal = sc->ah->ah_noise_floor + rs->rs_rssi;
1385 1370
@@ -1394,10 +1379,10 @@ ath5k_receive_frame(struct ath5k_softc *sc, struct sk_buff *skb,
1394 rxs->flag |= ath5k_rx_decrypted(sc, skb, rs); 1379 rxs->flag |= ath5k_rx_decrypted(sc, skb, rs);
1395 1380
1396 if (rxs->rate_idx >= 0 && rs->rs_rate == 1381 if (rxs->rate_idx >= 0 && rs->rs_rate ==
1397 sc->curband->bitrates[rxs->rate_idx].hw_value_short) 1382 sc->sbands[sc->curchan->band].bitrates[rxs->rate_idx].hw_value_short)
1398 rxs->flag |= RX_FLAG_SHORTPRE; 1383 rxs->flag |= RX_FLAG_SHORTPRE;
1399 1384
1400 ath5k_debug_dump_skb(sc, skb, "RX ", 0); 1385 trace_ath5k_rx(sc, skb);
1401 1386
1402 ath5k_update_beacon_rssi(sc, skb, rs->rs_rssi); 1387 ath5k_update_beacon_rssi(sc, skb, rs->rs_rssi);
1403 1388
@@ -1542,7 +1527,7 @@ ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
1542 unsigned long flags; 1527 unsigned long flags;
1543 int padsize; 1528 int padsize;
1544 1529
1545 ath5k_debug_dump_skb(sc, skb, "TX ", 1); 1530 trace_ath5k_tx(sc, skb, txq);
1546 1531
1547 /* 1532 /*
1548 * The hardware expects the header padded to 4 byte boundaries. 1533 * The hardware expects the header padded to 4 byte boundaries.
@@ -1591,7 +1576,7 @@ drop_packet:
1591 1576
1592static void 1577static void
1593ath5k_tx_frame_completed(struct ath5k_softc *sc, struct sk_buff *skb, 1578ath5k_tx_frame_completed(struct ath5k_softc *sc, struct sk_buff *skb,
1594 struct ath5k_tx_status *ts) 1579 struct ath5k_txq *txq, struct ath5k_tx_status *ts)
1595{ 1580{
1596 struct ieee80211_tx_info *info; 1581 struct ieee80211_tx_info *info;
1597 int i; 1582 int i;
@@ -1643,6 +1628,7 @@ ath5k_tx_frame_completed(struct ath5k_softc *sc, struct sk_buff *skb,
1643 else 1628 else
1644 sc->stats.antenna_tx[0]++; /* invalid */ 1629 sc->stats.antenna_tx[0]++; /* invalid */
1645 1630
1631 trace_ath5k_tx_complete(sc, skb, txq, ts);
1646 ieee80211_tx_status(sc->hw, skb); 1632 ieee80211_tx_status(sc->hw, skb);
1647} 1633}
1648 1634
@@ -1679,7 +1665,7 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
1679 1665
1680 dma_unmap_single(sc->dev, bf->skbaddr, skb->len, 1666 dma_unmap_single(sc->dev, bf->skbaddr, skb->len,
1681 DMA_TO_DEVICE); 1667 DMA_TO_DEVICE);
1682 ath5k_tx_frame_completed(sc, skb, &ts); 1668 ath5k_tx_frame_completed(sc, skb, txq, &ts);
1683 } 1669 }
1684 1670
1685 /* 1671 /*
@@ -1821,8 +1807,6 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1821 goto out; 1807 goto out;
1822 } 1808 }
1823 1809
1824 ath5k_debug_dump_skb(sc, skb, "BC ", 1);
1825
1826 ath5k_txbuf_free_skb(sc, avf->bbuf); 1810 ath5k_txbuf_free_skb(sc, avf->bbuf);
1827 avf->bbuf->skb = skb; 1811 avf->bbuf->skb = skb;
1828 ret = ath5k_beacon_setup(sc, avf->bbuf); 1812 ret = ath5k_beacon_setup(sc, avf->bbuf);
@@ -1917,6 +1901,8 @@ ath5k_beacon_send(struct ath5k_softc *sc)
1917 sc->opmode == NL80211_IFTYPE_MESH_POINT) 1901 sc->opmode == NL80211_IFTYPE_MESH_POINT)
1918 ath5k_beacon_update(sc->hw, vif); 1902 ath5k_beacon_update(sc->hw, vif);
1919 1903
1904 trace_ath5k_tx(sc, bf->skb, &sc->txqs[sc->bhalq]);
1905
1920 ath5k_hw_set_txdp(ah, sc->bhalq, bf->daddr); 1906 ath5k_hw_set_txdp(ah, sc->bhalq, bf->daddr);
1921 ath5k_hw_start_tx_dma(ah, sc->bhalq); 1907 ath5k_hw_start_tx_dma(ah, sc->bhalq);
1922 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "TXDP[%u] = %llx (%p)\n", 1908 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "TXDP[%u] = %llx (%p)\n",
@@ -2417,7 +2403,8 @@ ath5k_init_softc(struct ath5k_softc *sc, const struct ath_bus_ops *bus_ops)
2417 /* set up multi-rate retry capabilities */ 2403 /* set up multi-rate retry capabilities */
2418 if (sc->ah->ah_version == AR5K_AR5212) { 2404 if (sc->ah->ah_version == AR5K_AR5212) {
2419 hw->max_rates = 4; 2405 hw->max_rates = 4;
2420 hw->max_rate_tries = 11; 2406 hw->max_rate_tries = max(AR5K_INIT_RETRY_SHORT,
2407 AR5K_INIT_RETRY_LONG);
2421 } 2408 }
2422 2409
2423 hw->vif_data_size = sizeof(struct ath5k_vif); 2410 hw->vif_data_size = sizeof(struct ath5k_vif);
@@ -2554,7 +2541,6 @@ ath5k_init_hw(struct ath5k_softc *sc)
2554 * and then setup of the interrupt mask. 2541 * and then setup of the interrupt mask.
2555 */ 2542 */
2556 sc->curchan = sc->hw->conf.channel; 2543 sc->curchan = sc->hw->conf.channel;
2557 sc->curband = &sc->sbands[sc->curchan->band];
2558 sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL | 2544 sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
2559 AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL | 2545 AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
2560 AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB; 2546 AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB;
@@ -2681,10 +2667,8 @@ ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan,
2681 * so we should also free any remaining 2667 * so we should also free any remaining
2682 * tx buffers */ 2668 * tx buffers */
2683 ath5k_drain_tx_buffs(sc); 2669 ath5k_drain_tx_buffs(sc);
2684 if (chan) { 2670 if (chan)
2685 sc->curchan = chan; 2671 sc->curchan = chan;
2686 sc->curband = &sc->sbands[chan->band];
2687 }
2688 ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, chan != NULL, 2672 ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, chan != NULL,
2689 skip_pcu); 2673 skip_pcu);
2690 if (ret) { 2674 if (ret) {
@@ -2782,12 +2766,6 @@ ath5k_init(struct ieee80211_hw *hw)
2782 goto err; 2766 goto err;
2783 } 2767 }
2784 2768
2785 /* NB: setup here so ath5k_rate_update is happy */
2786 if (test_bit(AR5K_MODE_11A, ah->ah_modes))
2787 ath5k_setcurmode(sc, AR5K_MODE_11A);
2788 else
2789 ath5k_setcurmode(sc, AR5K_MODE_11B);
2790
2791 /* 2769 /*
2792 * Allocate tx+rx descriptors and populate the lists. 2770 * Allocate tx+rx descriptors and populate the lists.
2793 */ 2771 */
diff --git a/drivers/net/wireless/ath/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h
index 6d511476e4d..8f919dca95f 100644
--- a/drivers/net/wireless/ath/ath5k/base.h
+++ b/drivers/net/wireless/ath/ath5k/base.h
@@ -183,8 +183,6 @@ struct ath5k_softc {
183 enum nl80211_iftype opmode; 183 enum nl80211_iftype opmode;
184 struct ath5k_hw *ah; /* Atheros HW */ 184 struct ath5k_hw *ah; /* Atheros HW */
185 185
186 struct ieee80211_supported_band *curband;
187
188#ifdef CONFIG_ATH5K_DEBUG 186#ifdef CONFIG_ATH5K_DEBUG
189 struct ath5k_dbg_info debug; /* debug info */ 187 struct ath5k_dbg_info debug; /* debug info */
190#endif /* CONFIG_ATH5K_DEBUG */ 188#endif /* CONFIG_ATH5K_DEBUG */
@@ -202,7 +200,6 @@ struct ath5k_softc {
202#define ATH_STAT_STARTED 4 /* opened & irqs enabled */ 200#define ATH_STAT_STARTED 4 /* opened & irqs enabled */
203 201
204 unsigned int filter_flags; /* HW flags, AR5K_RX_FILTER_* */ 202 unsigned int filter_flags; /* HW flags, AR5K_RX_FILTER_* */
205 unsigned int curmode; /* current phy mode */
206 struct ieee80211_channel *curchan; /* current h/w channel */ 203 struct ieee80211_channel *curchan; /* current h/w channel */
207 204
208 u16 nvifs; 205 u16 nvifs;
diff --git a/drivers/net/wireless/ath/ath5k/caps.c b/drivers/net/wireless/ath/ath5k/caps.c
index 31cad80e9b0..f77e8a703c5 100644
--- a/drivers/net/wireless/ath/ath5k/caps.c
+++ b/drivers/net/wireless/ath/ath5k/caps.c
@@ -32,23 +32,24 @@
32 */ 32 */
33int ath5k_hw_set_capabilities(struct ath5k_hw *ah) 33int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
34{ 34{
35 struct ath5k_capabilities *caps = &ah->ah_capabilities;
35 u16 ee_header; 36 u16 ee_header;
36 37
37 /* Capabilities stored in the EEPROM */ 38 /* Capabilities stored in the EEPROM */
38 ee_header = ah->ah_capabilities.cap_eeprom.ee_header; 39 ee_header = caps->cap_eeprom.ee_header;
39 40
40 if (ah->ah_version == AR5K_AR5210) { 41 if (ah->ah_version == AR5K_AR5210) {
41 /* 42 /*
42 * Set radio capabilities 43 * Set radio capabilities
43 * (The AR5110 only supports the middle 5GHz band) 44 * (The AR5110 only supports the middle 5GHz band)
44 */ 45 */
45 ah->ah_capabilities.cap_range.range_5ghz_min = 5120; 46 caps->cap_range.range_5ghz_min = 5120;
46 ah->ah_capabilities.cap_range.range_5ghz_max = 5430; 47 caps->cap_range.range_5ghz_max = 5430;
47 ah->ah_capabilities.cap_range.range_2ghz_min = 0; 48 caps->cap_range.range_2ghz_min = 0;
48 ah->ah_capabilities.cap_range.range_2ghz_max = 0; 49 caps->cap_range.range_2ghz_max = 0;
49 50
50 /* Set supported modes */ 51 /* Set supported modes */
51 __set_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode); 52 __set_bit(AR5K_MODE_11A, caps->cap_mode);
52 } else { 53 } else {
53 /* 54 /*
54 * XXX The tranceiver supports frequencies from 4920 to 6100GHz 55 * XXX The tranceiver supports frequencies from 4920 to 6100GHz
@@ -56,9 +57,8 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
56 * XXX current ieee80211 implementation because the IEEE 57 * XXX current ieee80211 implementation because the IEEE
57 * XXX channel mapping does not support negative channel 58 * XXX channel mapping does not support negative channel
58 * XXX numbers (2312MHz is channel -19). Of course, this 59 * XXX numbers (2312MHz is channel -19). Of course, this
59 * XXX doesn't matter because these channels are out of range 60 * XXX doesn't matter because these channels are out of the
60 * XXX but some regulation domains like MKK (Japan) will 61 * XXX legal range.
61 * XXX support frequencies somewhere around 4.8GHz.
62 */ 62 */
63 63
64 /* 64 /*
@@ -66,13 +66,14 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
66 */ 66 */
67 67
68 if (AR5K_EEPROM_HDR_11A(ee_header)) { 68 if (AR5K_EEPROM_HDR_11A(ee_header)) {
69 /* 4920 */ 69 if (ath_is_49ghz_allowed(caps->cap_eeprom.ee_regdomain))
70 ah->ah_capabilities.cap_range.range_5ghz_min = 5005; 70 caps->cap_range.range_5ghz_min = 4920;
71 ah->ah_capabilities.cap_range.range_5ghz_max = 6100; 71 else
72 caps->cap_range.range_5ghz_min = 5005;
73 caps->cap_range.range_5ghz_max = 6100;
72 74
73 /* Set supported modes */ 75 /* Set supported modes */
74 __set_bit(AR5K_MODE_11A, 76 __set_bit(AR5K_MODE_11A, caps->cap_mode);
75 ah->ah_capabilities.cap_mode);
76 } 77 }
77 78
78 /* Enable 802.11b if a 2GHz capable radio (2111/5112) is 79 /* Enable 802.11b if a 2GHz capable radio (2111/5112) is
@@ -81,32 +82,29 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
81 (AR5K_EEPROM_HDR_11G(ee_header) && 82 (AR5K_EEPROM_HDR_11G(ee_header) &&
82 ah->ah_version != AR5K_AR5211)) { 83 ah->ah_version != AR5K_AR5211)) {
83 /* 2312 */ 84 /* 2312 */
84 ah->ah_capabilities.cap_range.range_2ghz_min = 2412; 85 caps->cap_range.range_2ghz_min = 2412;
85 ah->ah_capabilities.cap_range.range_2ghz_max = 2732; 86 caps->cap_range.range_2ghz_max = 2732;
86 87
87 if (AR5K_EEPROM_HDR_11B(ee_header)) 88 if (AR5K_EEPROM_HDR_11B(ee_header))
88 __set_bit(AR5K_MODE_11B, 89 __set_bit(AR5K_MODE_11B, caps->cap_mode);
89 ah->ah_capabilities.cap_mode);
90 90
91 if (AR5K_EEPROM_HDR_11G(ee_header) && 91 if (AR5K_EEPROM_HDR_11G(ee_header) &&
92 ah->ah_version != AR5K_AR5211) 92 ah->ah_version != AR5K_AR5211)
93 __set_bit(AR5K_MODE_11G, 93 __set_bit(AR5K_MODE_11G, caps->cap_mode);
94 ah->ah_capabilities.cap_mode);
95 } 94 }
96 } 95 }
97 96
98 /* Set number of supported TX queues */ 97 /* Set number of supported TX queues */
99 if (ah->ah_version == AR5K_AR5210) 98 if (ah->ah_version == AR5K_AR5210)
100 ah->ah_capabilities.cap_queues.q_tx_num = 99 caps->cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES_NOQCU;
101 AR5K_NUM_TX_QUEUES_NOQCU;
102 else 100 else
103 ah->ah_capabilities.cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES; 101 caps->cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES;
104 102
105 /* newer hardware has PHY error counters */ 103 /* newer hardware has PHY error counters */
106 if (ah->ah_mac_srev >= AR5K_SREV_AR5213A) 104 if (ah->ah_mac_srev >= AR5K_SREV_AR5213A)
107 ah->ah_capabilities.cap_has_phyerr_counters = true; 105 caps->cap_has_phyerr_counters = true;
108 else 106 else
109 ah->ah_capabilities.cap_has_phyerr_counters = false; 107 caps->cap_has_phyerr_counters = false;
110 108
111 return 0; 109 return 0;
112} 110}
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index d2f84d76bb0..0230f30e9e9 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -308,8 +308,6 @@ static const struct {
308 { ATH5K_DEBUG_CALIBRATE, "calib", "periodic calibration" }, 308 { ATH5K_DEBUG_CALIBRATE, "calib", "periodic calibration" },
309 { ATH5K_DEBUG_TXPOWER, "txpower", "transmit power setting" }, 309 { ATH5K_DEBUG_TXPOWER, "txpower", "transmit power setting" },
310 { ATH5K_DEBUG_LED, "led", "LED management" }, 310 { ATH5K_DEBUG_LED, "led", "LED management" },
311 { ATH5K_DEBUG_DUMP_RX, "dumprx", "print received skb content" },
312 { ATH5K_DEBUG_DUMP_TX, "dumptx", "print transmit skb content" },
313 { ATH5K_DEBUG_DUMPBANDS, "dumpbands", "dump bands" }, 311 { ATH5K_DEBUG_DUMPBANDS, "dumpbands", "dump bands" },
314 { ATH5K_DEBUG_DMA, "dma", "dma start/stop" }, 312 { ATH5K_DEBUG_DMA, "dma", "dma start/stop" },
315 { ATH5K_DEBUG_ANI, "ani", "adaptive noise immunity" }, 313 { ATH5K_DEBUG_ANI, "ani", "adaptive noise immunity" },
@@ -1036,24 +1034,6 @@ ath5k_debug_printrxbuffs(struct ath5k_softc *sc, struct ath5k_hw *ah)
1036} 1034}
1037 1035
1038void 1036void
1039ath5k_debug_dump_skb(struct ath5k_softc *sc,
1040 struct sk_buff *skb, const char *prefix, int tx)
1041{
1042 char buf[16];
1043
1044 if (likely(!((tx && (sc->debug.level & ATH5K_DEBUG_DUMP_TX)) ||
1045 (!tx && (sc->debug.level & ATH5K_DEBUG_DUMP_RX)))))
1046 return;
1047
1048 snprintf(buf, sizeof(buf), "%s %s", wiphy_name(sc->hw->wiphy), prefix);
1049
1050 print_hex_dump_bytes(buf, DUMP_PREFIX_NONE, skb->data,
1051 min(200U, skb->len));
1052
1053 printk(KERN_DEBUG "\n");
1054}
1055
1056void
1057ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf) 1037ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf)
1058{ 1038{
1059 struct ath5k_desc *ds = bf->desc; 1039 struct ath5k_desc *ds = bf->desc;
diff --git a/drivers/net/wireless/ath/ath5k/debug.h b/drivers/net/wireless/ath/ath5k/debug.h
index 3e34428d512..b0355aef68d 100644
--- a/drivers/net/wireless/ath/ath5k/debug.h
+++ b/drivers/net/wireless/ath/ath5k/debug.h
@@ -116,8 +116,6 @@ enum ath5k_debug_level {
116 ATH5K_DEBUG_CALIBRATE = 0x00000020, 116 ATH5K_DEBUG_CALIBRATE = 0x00000020,
117 ATH5K_DEBUG_TXPOWER = 0x00000040, 117 ATH5K_DEBUG_TXPOWER = 0x00000040,
118 ATH5K_DEBUG_LED = 0x00000080, 118 ATH5K_DEBUG_LED = 0x00000080,
119 ATH5K_DEBUG_DUMP_RX = 0x00000100,
120 ATH5K_DEBUG_DUMP_TX = 0x00000200,
121 ATH5K_DEBUG_DUMPBANDS = 0x00000400, 119 ATH5K_DEBUG_DUMPBANDS = 0x00000400,
122 ATH5K_DEBUG_DMA = 0x00000800, 120 ATH5K_DEBUG_DMA = 0x00000800,
123 ATH5K_DEBUG_ANI = 0x00002000, 121 ATH5K_DEBUG_ANI = 0x00002000,
@@ -152,10 +150,6 @@ void
152ath5k_debug_dump_bands(struct ath5k_softc *sc); 150ath5k_debug_dump_bands(struct ath5k_softc *sc);
153 151
154void 152void
155ath5k_debug_dump_skb(struct ath5k_softc *sc,
156 struct sk_buff *skb, const char *prefix, int tx);
157
158void
159ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf); 153ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf);
160 154
161#else /* no debugging */ 155#else /* no debugging */
@@ -182,10 +176,6 @@ static inline void
182ath5k_debug_dump_bands(struct ath5k_softc *sc) {} 176ath5k_debug_dump_bands(struct ath5k_softc *sc) {}
183 177
184static inline void 178static inline void
185ath5k_debug_dump_skb(struct ath5k_softc *sc,
186 struct sk_buff *skb, const char *prefix, int tx) {}
187
188static inline void
189ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf) {} 179ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf) {}
190 180
191#endif /* ifdef CONFIG_ATH5K_DEBUG */ 181#endif /* ifdef CONFIG_ATH5K_DEBUG */
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index 80e625608ba..b6561f785c6 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -72,7 +72,6 @@ static int
72ath5k_eeprom_init_header(struct ath5k_hw *ah) 72ath5k_eeprom_init_header(struct ath5k_hw *ah)
73{ 73{
74 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; 74 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
75 int ret;
76 u16 val; 75 u16 val;
77 u32 cksum, offset, eep_max = AR5K_EEPROM_INFO_MAX; 76 u32 cksum, offset, eep_max = AR5K_EEPROM_INFO_MAX;
78 77
@@ -192,7 +191,7 @@ static int ath5k_eeprom_read_ants(struct ath5k_hw *ah, u32 *offset,
192 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; 191 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
193 u32 o = *offset; 192 u32 o = *offset;
194 u16 val; 193 u16 val;
195 int ret, i = 0; 194 int i = 0;
196 195
197 AR5K_EEPROM_READ(o++, val); 196 AR5K_EEPROM_READ(o++, val);
198 ee->ee_switch_settling[mode] = (val >> 8) & 0x7f; 197 ee->ee_switch_settling[mode] = (val >> 8) & 0x7f;
@@ -252,7 +251,6 @@ static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset,
252 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; 251 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
253 u32 o = *offset; 252 u32 o = *offset;
254 u16 val; 253 u16 val;
255 int ret;
256 254
257 ee->ee_n_piers[mode] = 0; 255 ee->ee_n_piers[mode] = 0;
258 AR5K_EEPROM_READ(o++, val); 256 AR5K_EEPROM_READ(o++, val);
@@ -515,7 +513,6 @@ ath5k_eeprom_read_freq_list(struct ath5k_hw *ah, int *offset, int max,
515 int o = *offset; 513 int o = *offset;
516 int i = 0; 514 int i = 0;
517 u8 freq1, freq2; 515 u8 freq1, freq2;
518 int ret;
519 u16 val; 516 u16 val;
520 517
521 ee->ee_n_piers[mode] = 0; 518 ee->ee_n_piers[mode] = 0;
@@ -551,7 +548,7 @@ ath5k_eeprom_init_11a_pcal_freq(struct ath5k_hw *ah, int offset)
551{ 548{
552 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; 549 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
553 struct ath5k_chan_pcal_info *pcal = ee->ee_pwr_cal_a; 550 struct ath5k_chan_pcal_info *pcal = ee->ee_pwr_cal_a;
554 int i, ret; 551 int i;
555 u16 val; 552 u16 val;
556 u8 mask; 553 u8 mask;
557 554
@@ -970,7 +967,6 @@ ath5k_eeprom_read_pcal_info_5112(struct ath5k_hw *ah, int mode)
970 u32 offset; 967 u32 offset;
971 u8 i, c; 968 u8 i, c;
972 u16 val; 969 u16 val;
973 int ret;
974 u8 pd_gains = 0; 970 u8 pd_gains = 0;
975 971
976 /* Count how many curves we have and 972 /* Count how many curves we have and
@@ -1228,7 +1224,7 @@ ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode)
1228 struct ath5k_chan_pcal_info *chinfo; 1224 struct ath5k_chan_pcal_info *chinfo;
1229 u8 *pdgain_idx = ee->ee_pdc_to_idx[mode]; 1225 u8 *pdgain_idx = ee->ee_pdc_to_idx[mode];
1230 u32 offset; 1226 u32 offset;
1231 int idx, i, ret; 1227 int idx, i;
1232 u16 val; 1228 u16 val;
1233 u8 pd_gains = 0; 1229 u8 pd_gains = 0;
1234 1230
@@ -1419,7 +1415,7 @@ ath5k_eeprom_read_target_rate_pwr_info(struct ath5k_hw *ah, unsigned int mode)
1419 u8 *rate_target_pwr_num; 1415 u8 *rate_target_pwr_num;
1420 u32 offset; 1416 u32 offset;
1421 u16 val; 1417 u16 val;
1422 int ret, i; 1418 int i;
1423 1419
1424 offset = AR5K_EEPROM_TARGET_PWRSTART(ee->ee_misc1); 1420 offset = AR5K_EEPROM_TARGET_PWRSTART(ee->ee_misc1);
1425 rate_target_pwr_num = &ee->ee_rate_target_pwr_num[mode]; 1421 rate_target_pwr_num = &ee->ee_rate_target_pwr_num[mode];
@@ -1593,7 +1589,7 @@ ath5k_eeprom_read_ctl_info(struct ath5k_hw *ah)
1593 struct ath5k_edge_power *rep; 1589 struct ath5k_edge_power *rep;
1594 unsigned int fmask, pmask; 1590 unsigned int fmask, pmask;
1595 unsigned int ctl_mode; 1591 unsigned int ctl_mode;
1596 int ret, i, j; 1592 int i, j;
1597 u32 offset; 1593 u32 offset;
1598 u16 val; 1594 u16 val;
1599 1595
@@ -1733,16 +1729,12 @@ int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
1733 u8 mac_d[ETH_ALEN] = {}; 1729 u8 mac_d[ETH_ALEN] = {};
1734 u32 total, offset; 1730 u32 total, offset;
1735 u16 data; 1731 u16 data;
1736 int octet, ret; 1732 int octet;
1737 1733
1738 ret = ath5k_hw_nvram_read(ah, 0x20, &data); 1734 AR5K_EEPROM_READ(0x20, data);
1739 if (ret)
1740 return ret;
1741 1735
1742 for (offset = 0x1f, octet = 0, total = 0; offset >= 0x1d; offset--) { 1736 for (offset = 0x1f, octet = 0, total = 0; offset >= 0x1d; offset--) {
1743 ret = ath5k_hw_nvram_read(ah, offset, &data); 1737 AR5K_EEPROM_READ(offset, data);
1744 if (ret)
1745 return ret;
1746 1738
1747 total += data; 1739 total += data;
1748 mac_d[octet + 1] = data & 0xff; 1740 mac_d[octet + 1] = data & 0xff;
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.h b/drivers/net/wireless/ath/ath5k/eeprom.h
index 7c09e150dbd..6511c27d938 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.h
+++ b/drivers/net/wireless/ath/ath5k/eeprom.h
@@ -241,9 +241,8 @@ enum ath5k_eeprom_freq_bands{
241#define AR5K_SPUR_SYMBOL_WIDTH_TURBO_100Hz 6250 241#define AR5K_SPUR_SYMBOL_WIDTH_TURBO_100Hz 6250
242 242
243#define AR5K_EEPROM_READ(_o, _v) do { \ 243#define AR5K_EEPROM_READ(_o, _v) do { \
244 ret = ath5k_hw_nvram_read(ah, (_o), &(_v)); \ 244 if (!ath5k_hw_nvram_read(ah, (_o), &(_v))) \
245 if (ret) \ 245 return -EIO; \
246 return ret; \
247} while (0) 246} while (0)
248 247
249#define AR5K_EEPROM_READ_HDR(_o, _v) \ 248#define AR5K_EEPROM_READ_HDR(_o, _v) \
@@ -269,29 +268,6 @@ enum ath5k_ctl_mode {
269 AR5K_CTL_MODE_M = 15, 268 AR5K_CTL_MODE_M = 15,
270}; 269};
271 270
272/* Default CTL ids for the 3 main reg domains.
273 * Atheros only uses these by default but vendors
274 * can have up to 32 different CTLs for different
275 * scenarios. Note that theese values are ORed with
276 * the mode id (above) so we can have up to 24 CTL
277 * datasets out of these 3 main regdomains. That leaves
278 * 8 ids that can be used by vendors and since 0x20 is
279 * missing from HAL sources i guess this is the set of
280 * custom CTLs vendors can use. */
281#define AR5K_CTL_FCC 0x10
282#define AR5K_CTL_CUSTOM 0x20
283#define AR5K_CTL_ETSI 0x30
284#define AR5K_CTL_MKK 0x40
285
286/* Indicates a CTL with only mode set and
287 * no reg domain mapping, such CTLs are used
288 * for world roaming domains or simply when
289 * a reg domain is not set */
290#define AR5K_CTL_NO_REGDOMAIN 0xf0
291
292/* Indicates an empty (invalid) CTL */
293#define AR5K_CTL_NO_CTL 0xff
294
295/* Per channel calibration data, used for power table setup */ 271/* Per channel calibration data, used for power table setup */
296struct ath5k_chan_pcal_info_rf5111 { 272struct ath5k_chan_pcal_info_rf5111 {
297 /* Power levels in half dbm units 273 /* Power levels in half dbm units
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index d76d68c99f7..a60a726a140 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -48,23 +48,6 @@
48 48
49extern int ath5k_modparam_nohwcrypt; 49extern int ath5k_modparam_nohwcrypt;
50 50
51/* functions used from base.c */
52void set_beacon_filter(struct ieee80211_hw *hw, bool enable);
53bool ath_any_vif_assoc(struct ath5k_softc *sc);
54int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
55 struct ath5k_txq *txq);
56int ath5k_init_hw(struct ath5k_softc *sc);
57int ath5k_stop_hw(struct ath5k_softc *sc);
58void ath5k_mode_setup(struct ath5k_softc *sc, struct ieee80211_vif *vif);
59void ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
60 struct ieee80211_vif *vif);
61int ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan);
62void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf);
63int ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
64void ath5k_beacon_config(struct ath5k_softc *sc);
65void ath5k_txbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf);
66void ath5k_rxbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf);
67
68/********************\ 51/********************\
69* Mac80211 functions * 52* Mac80211 functions *
70\********************/ 53\********************/
@@ -226,6 +209,7 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed)
226 struct ath5k_hw *ah = sc->ah; 209 struct ath5k_hw *ah = sc->ah;
227 struct ieee80211_conf *conf = &hw->conf; 210 struct ieee80211_conf *conf = &hw->conf;
228 int ret = 0; 211 int ret = 0;
212 int i;
229 213
230 mutex_lock(&sc->lock); 214 mutex_lock(&sc->lock);
231 215
@@ -243,6 +227,14 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed)
243 ath5k_hw_set_txpower_limit(ah, (conf->power_level * 2)); 227 ath5k_hw_set_txpower_limit(ah, (conf->power_level * 2));
244 } 228 }
245 229
230 if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
231 ah->ah_retry_long = conf->long_frame_max_tx_count;
232 ah->ah_retry_short = conf->short_frame_max_tx_count;
233
234 for (i = 0; i < ah->ah_capabilities.cap_queues.q_tx_num; i++)
235 ath5k_hw_set_tx_retry_limits(ah, i);
236 }
237
246 /* TODO: 238 /* TODO:
247 * 1) Move this on config_interface and handle each case 239 * 1) Move this on config_interface and handle each case
248 * separately eg. when we have only one STA vif, use 240 * separately eg. when we have only one STA vif, use
diff --git a/drivers/net/wireless/ath/ath5k/pci.c b/drivers/net/wireless/ath/ath5k/pci.c
index 7f8c5b0e9d2..66598a0d1df 100644
--- a/drivers/net/wireless/ath/ath5k/pci.c
+++ b/drivers/net/wireless/ath/ath5k/pci.c
@@ -69,7 +69,8 @@ static void ath5k_pci_read_cachesize(struct ath_common *common, int *csz)
69/* 69/*
70 * Read from eeprom 70 * Read from eeprom
71 */ 71 */
72bool ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data) 72static bool
73ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data)
73{ 74{
74 struct ath5k_hw *ah = (struct ath5k_hw *) common->ah; 75 struct ath5k_hw *ah = (struct ath5k_hw *) common->ah;
75 u32 status, timeout; 76 u32 status, timeout;
@@ -90,15 +91,15 @@ bool ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data)
90 status = ath5k_hw_reg_read(ah, AR5K_EEPROM_STATUS); 91 status = ath5k_hw_reg_read(ah, AR5K_EEPROM_STATUS);
91 if (status & AR5K_EEPROM_STAT_RDDONE) { 92 if (status & AR5K_EEPROM_STAT_RDDONE) {
92 if (status & AR5K_EEPROM_STAT_RDERR) 93 if (status & AR5K_EEPROM_STAT_RDERR)
93 return -EIO; 94 return false;
94 *data = (u16)(ath5k_hw_reg_read(ah, AR5K_EEPROM_DATA) & 95 *data = (u16)(ath5k_hw_reg_read(ah, AR5K_EEPROM_DATA) &
95 0xffff); 96 0xffff);
96 return 0; 97 return true;
97 } 98 }
98 udelay(15); 99 udelay(15);
99 } 100 }
100 101
101 return -ETIMEDOUT; 102 return false;
102} 103}
103 104
104int ath5k_hw_read_srev(struct ath5k_hw *ah) 105int ath5k_hw_read_srev(struct ath5k_hw *ah)
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index 2c9c9e793d4..3343fb9e494 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -228,24 +228,9 @@ int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
228/* 228/*
229 * Set tx retry limits on DCU 229 * Set tx retry limits on DCU
230 */ 230 */
231static void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah, 231void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
232 unsigned int queue) 232 unsigned int queue)
233{ 233{
234 u32 retry_lg, retry_sh;
235
236 /*
237 * Calculate and set retry limits
238 */
239 if (ah->ah_software_retry) {
240 /* XXX Need to test this */
241 retry_lg = ah->ah_limit_tx_retries;
242 retry_sh = retry_lg = retry_lg > AR5K_DCU_RETRY_LMT_SH_RETRY ?
243 AR5K_DCU_RETRY_LMT_SH_RETRY : retry_lg;
244 } else {
245 retry_lg = AR5K_INIT_LG_RETRY;
246 retry_sh = AR5K_INIT_SH_RETRY;
247 }
248
249 /* Single data queue on AR5210 */ 234 /* Single data queue on AR5210 */
250 if (ah->ah_version == AR5K_AR5210) { 235 if (ah->ah_version == AR5K_AR5210) {
251 struct ath5k_txq_info *tq = &ah->ah_txq[queue]; 236 struct ath5k_txq_info *tq = &ah->ah_txq[queue];
@@ -255,25 +240,26 @@ static void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
255 240
256 ath5k_hw_reg_write(ah, 241 ath5k_hw_reg_write(ah,
257 (tq->tqi_cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S) 242 (tq->tqi_cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S)
258 | AR5K_REG_SM(AR5K_INIT_SLG_RETRY, 243 | AR5K_REG_SM(ah->ah_retry_long,
259 AR5K_NODCU_RETRY_LMT_SLG_RETRY) 244 AR5K_NODCU_RETRY_LMT_SLG_RETRY)
260 | AR5K_REG_SM(AR5K_INIT_SSH_RETRY, 245 | AR5K_REG_SM(ah->ah_retry_short,
261 AR5K_NODCU_RETRY_LMT_SSH_RETRY) 246 AR5K_NODCU_RETRY_LMT_SSH_RETRY)
262 | AR5K_REG_SM(retry_lg, AR5K_NODCU_RETRY_LMT_LG_RETRY) 247 | AR5K_REG_SM(ah->ah_retry_long,
263 | AR5K_REG_SM(retry_sh, AR5K_NODCU_RETRY_LMT_SH_RETRY), 248 AR5K_NODCU_RETRY_LMT_LG_RETRY)
249 | AR5K_REG_SM(ah->ah_retry_short,
250 AR5K_NODCU_RETRY_LMT_SH_RETRY),
264 AR5K_NODCU_RETRY_LMT); 251 AR5K_NODCU_RETRY_LMT);
265 /* DCU on AR5211+ */ 252 /* DCU on AR5211+ */
266 } else { 253 } else {
267 ath5k_hw_reg_write(ah, 254 ath5k_hw_reg_write(ah,
268 AR5K_REG_SM(AR5K_INIT_SLG_RETRY, 255 AR5K_REG_SM(ah->ah_retry_long,
269 AR5K_DCU_RETRY_LMT_SLG_RETRY) | 256 AR5K_DCU_RETRY_LMT_RTS)
270 AR5K_REG_SM(AR5K_INIT_SSH_RETRY, 257 | AR5K_REG_SM(ah->ah_retry_long,
271 AR5K_DCU_RETRY_LMT_SSH_RETRY) | 258 AR5K_DCU_RETRY_LMT_STA_RTS)
272 AR5K_REG_SM(retry_lg, AR5K_DCU_RETRY_LMT_LG_RETRY) | 259 | AR5K_REG_SM(max(ah->ah_retry_long, ah->ah_retry_short),
273 AR5K_REG_SM(retry_sh, AR5K_DCU_RETRY_LMT_SH_RETRY), 260 AR5K_DCU_RETRY_LMT_STA_DATA),
274 AR5K_QUEUE_DFS_RETRY_LIMIT(queue)); 261 AR5K_QUEUE_DFS_RETRY_LIMIT(queue));
275 } 262 }
276 return;
277} 263}
278 264
279/** 265/**
diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h
index fd14b910395..e1c9abd8c87 100644
--- a/drivers/net/wireless/ath/ath5k/reg.h
+++ b/drivers/net/wireless/ath/ath5k/reg.h
@@ -686,16 +686,15 @@
686 686
687/* 687/*
688 * DCU retry limit registers 688 * DCU retry limit registers
689 * all these fields don't allow zero values
689 */ 690 */
690#define AR5K_DCU_RETRY_LMT_BASE 0x1080 /* Register Address -Queue0 DCU_RETRY_LMT */ 691#define AR5K_DCU_RETRY_LMT_BASE 0x1080 /* Register Address -Queue0 DCU_RETRY_LMT */
691#define AR5K_DCU_RETRY_LMT_SH_RETRY 0x0000000f /* Short retry limit mask */ 692#define AR5K_DCU_RETRY_LMT_RTS 0x0000000f /* RTS failure limit. Transmission fails if no CTS is received for this number of times */
692#define AR5K_DCU_RETRY_LMT_SH_RETRY_S 0 693#define AR5K_DCU_RETRY_LMT_RTS_S 0
693#define AR5K_DCU_RETRY_LMT_LG_RETRY 0x000000f0 /* Long retry limit mask */ 694#define AR5K_DCU_RETRY_LMT_STA_RTS 0x00003f00 /* STA RTS failure limit. If exceeded CW reset */
694#define AR5K_DCU_RETRY_LMT_LG_RETRY_S 4 695#define AR5K_DCU_RETRY_LMT_STA_RTS_S 8
695#define AR5K_DCU_RETRY_LMT_SSH_RETRY 0x00003f00 /* Station short retry limit mask (?) */ 696#define AR5K_DCU_RETRY_LMT_STA_DATA 0x000fc000 /* STA data failure limit. If exceeded CW reset. */
696#define AR5K_DCU_RETRY_LMT_SSH_RETRY_S 8 697#define AR5K_DCU_RETRY_LMT_STA_DATA_S 14
697#define AR5K_DCU_RETRY_LMT_SLG_RETRY 0x000fc000 /* Station long retry limit mask (?) */
698#define AR5K_DCU_RETRY_LMT_SLG_RETRY_S 14
699#define AR5K_QUEUE_DFS_RETRY_LIMIT(_q) AR5K_QUEUE_REG(AR5K_DCU_RETRY_LMT_BASE, _q) 698#define AR5K_QUEUE_DFS_RETRY_LIMIT(_q) AR5K_QUEUE_REG(AR5K_DCU_RETRY_LMT_BASE, _q)
700 699
701/* 700/*
diff --git a/drivers/net/wireless/ath/ath5k/trace.h b/drivers/net/wireless/ath/ath5k/trace.h
new file mode 100644
index 00000000000..2de68adb624
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/trace.h
@@ -0,0 +1,107 @@
1#if !defined(__TRACE_ATH5K_H) || defined(TRACE_HEADER_MULTI_READ)
2#define __TRACE_ATH5K_H
3
4#include <linux/tracepoint.h>
5#include "base.h"
6
7#ifndef CONFIG_ATH5K_TRACER
8#undef TRACE_EVENT
9#define TRACE_EVENT(name, proto, ...) \
10static inline void trace_ ## name(proto) {}
11#endif
12
13struct sk_buff;
14
15#define PRIV_ENTRY __field(struct ath5k_softc *, priv)
16#define PRIV_ASSIGN __entry->priv = priv
17
18#undef TRACE_SYSTEM
19#define TRACE_SYSTEM ath5k
20
21TRACE_EVENT(ath5k_rx,
22 TP_PROTO(struct ath5k_softc *priv, struct sk_buff *skb),
23 TP_ARGS(priv, skb),
24 TP_STRUCT__entry(
25 PRIV_ENTRY
26 __field(unsigned long, skbaddr)
27 __dynamic_array(u8, frame, skb->len)
28 ),
29 TP_fast_assign(
30 PRIV_ASSIGN;
31 __entry->skbaddr = (unsigned long) skb;
32 memcpy(__get_dynamic_array(frame), skb->data, skb->len);
33 ),
34 TP_printk(
35 "[%p] RX skb=%lx", __entry->priv, __entry->skbaddr
36 )
37);
38
39TRACE_EVENT(ath5k_tx,
40 TP_PROTO(struct ath5k_softc *priv, struct sk_buff *skb,
41 struct ath5k_txq *q),
42
43 TP_ARGS(priv, skb, q),
44
45 TP_STRUCT__entry(
46 PRIV_ENTRY
47 __field(unsigned long, skbaddr)
48 __field(u8, qnum)
49 __dynamic_array(u8, frame, skb->len)
50 ),
51
52 TP_fast_assign(
53 PRIV_ASSIGN;
54 __entry->skbaddr = (unsigned long) skb;
55 __entry->qnum = (u8) q->qnum;
56 memcpy(__get_dynamic_array(frame), skb->data, skb->len);
57 ),
58
59 TP_printk(
60 "[%p] TX skb=%lx q=%d", __entry->priv, __entry->skbaddr,
61 __entry->qnum
62 )
63);
64
65TRACE_EVENT(ath5k_tx_complete,
66 TP_PROTO(struct ath5k_softc *priv, struct sk_buff *skb,
67 struct ath5k_txq *q, struct ath5k_tx_status *ts),
68
69 TP_ARGS(priv, skb, q, ts),
70
71 TP_STRUCT__entry(
72 PRIV_ENTRY
73 __field(unsigned long, skbaddr)
74 __field(u8, qnum)
75 __field(u8, ts_status)
76 __field(s8, ts_rssi)
77 __field(u8, ts_antenna)
78 ),
79
80 TP_fast_assign(
81 PRIV_ASSIGN;
82 __entry->skbaddr = (unsigned long) skb;
83 __entry->qnum = (u8) q->qnum;
84 __entry->ts_status = ts->ts_status;
85 __entry->ts_rssi = ts->ts_rssi;
86 __entry->ts_antenna = ts->ts_antenna;
87 ),
88
89 TP_printk(
90 "[%p] TX end skb=%lx q=%d stat=%x rssi=%d ant=%x",
91 __entry->priv, __entry->skbaddr, __entry->qnum,
92 __entry->ts_status, __entry->ts_rssi, __entry->ts_antenna
93 )
94);
95
96#endif /* __TRACE_ATH5K_H */
97
98#ifdef CONFIG_ATH5K_TRACER
99
100#undef TRACE_INCLUDE_PATH
101#define TRACE_INCLUDE_PATH ../../drivers/net/wireless/ath/ath5k
102#undef TRACE_INCLUDE_FILE
103#define TRACE_INCLUDE_FILE trace
104
105#include <trace/define_trace.h>
106
107#endif
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index aca01621c20..4d66ca8042e 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -4,7 +4,6 @@ ath9k-y += beacon.o \
4 main.o \ 4 main.o \
5 recv.o \ 5 recv.o \
6 xmit.o \ 6 xmit.o \
7 virtual.o \
8 7
9ath9k-$(CONFIG_ATH9K_RATE_CONTROL) += rc.o 8ath9k-$(CONFIG_ATH9K_RATE_CONTROL) += rc.o
10ath9k-$(CONFIG_PCI) += pci.o 9ath9k-$(CONFIG_PCI) += pci.o
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 25a6e4417cd..99367210596 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -54,7 +54,6 @@ static struct ath_bus_ops ath_ahb_bus_ops = {
54static int ath_ahb_probe(struct platform_device *pdev) 54static int ath_ahb_probe(struct platform_device *pdev)
55{ 55{
56 void __iomem *mem; 56 void __iomem *mem;
57 struct ath_wiphy *aphy;
58 struct ath_softc *sc; 57 struct ath_softc *sc;
59 struct ieee80211_hw *hw; 58 struct ieee80211_hw *hw;
60 struct resource *res; 59 struct resource *res;
@@ -92,8 +91,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
92 91
93 irq = res->start; 92 irq = res->start;
94 93
95 hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy) + 94 hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops);
96 sizeof(struct ath_softc), &ath9k_ops);
97 if (hw == NULL) { 95 if (hw == NULL) {
98 dev_err(&pdev->dev, "no memory for ieee80211_hw\n"); 96 dev_err(&pdev->dev, "no memory for ieee80211_hw\n");
99 ret = -ENOMEM; 97 ret = -ENOMEM;
@@ -103,11 +101,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
103 SET_IEEE80211_DEV(hw, &pdev->dev); 101 SET_IEEE80211_DEV(hw, &pdev->dev);
104 platform_set_drvdata(pdev, hw); 102 platform_set_drvdata(pdev, hw);
105 103
106 aphy = hw->priv; 104 sc = hw->priv;
107 sc = (struct ath_softc *) (aphy + 1);
108 aphy->sc = sc;
109 aphy->hw = hw;
110 sc->pri_wiphy = aphy;
111 sc->hw = hw; 105 sc->hw = hw;
112 sc->dev = &pdev->dev; 106 sc->dev = &pdev->dev;
113 sc->mem = mem; 107 sc->mem = mem;
@@ -151,8 +145,7 @@ static int ath_ahb_remove(struct platform_device *pdev)
151 struct ieee80211_hw *hw = platform_get_drvdata(pdev); 145 struct ieee80211_hw *hw = platform_get_drvdata(pdev);
152 146
153 if (hw) { 147 if (hw) {
154 struct ath_wiphy *aphy = hw->priv; 148 struct ath_softc *sc = hw->priv;
155 struct ath_softc *sc = aphy->sc;
156 void __iomem *mem = sc->mem; 149 void __iomem *mem = sc->mem;
157 150
158 ath9k_deinit_device(sc); 151 ath9k_deinit_device(sc);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 5e300bd3d26..76388c6d669 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -805,7 +805,10 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
805{ 805{
806 struct ath_common *common = ath9k_hw_common(ah); 806 struct ath_common *common = ath9k_hw_common(ah);
807 807
808 if (AR_SREV_9271(ah) || AR_SREV_9285_12_OR_LATER(ah)) { 808 if (AR_SREV_9271(ah)) {
809 if (!ar9285_hw_cl_cal(ah, chan))
810 return false;
811 } else if (AR_SREV_9285_12_OR_LATER(ah)) {
809 if (!ar9285_hw_clc(ah, chan)) 812 if (!ar9285_hw_clc(ah, chan))
810 return false; 813 return false;
811 } else { 814 } else {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 4819747fa4c..4a927180299 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3673,7 +3673,7 @@ static void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
3673 return; 3673 return;
3674 3674
3675 reg_pmu_set = (5 << 1) | (7 << 4) | (1 << 8) | 3675 reg_pmu_set = (5 << 1) | (7 << 4) | (1 << 8) |
3676 (7 << 14) | (6 << 17) | (1 << 20) | 3676 (2 << 14) | (6 << 17) | (1 << 20) |
3677 (3 << 24) | (1 << 28); 3677 (3 << 24) | (1 << 28);
3678 3678
3679 REG_WRITE(ah, AR_PHY_PMU1, reg_pmu_set); 3679 REG_WRITE(ah, AR_PHY_PMU1, reg_pmu_set);
@@ -3959,19 +3959,19 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
3959{ 3959{
3960#define POW_SM(_r, _s) (((_r) & 0x3f) << (_s)) 3960#define POW_SM(_r, _s) (((_r) & 0x3f) << (_s))
3961 /* make sure forced gain is not set */ 3961 /* make sure forced gain is not set */
3962 REG_WRITE(ah, 0xa458, 0); 3962 REG_WRITE(ah, AR_PHY_TX_FORCED_GAIN, 0);
3963 3963
3964 /* Write the OFDM power per rate set */ 3964 /* Write the OFDM power per rate set */
3965 3965
3966 /* 6 (LSB), 9, 12, 18 (MSB) */ 3966 /* 6 (LSB), 9, 12, 18 (MSB) */
3967 REG_WRITE(ah, 0xa3c0, 3967 REG_WRITE(ah, AR_PHY_POWER_TX_RATE(0),
3968 POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 24) | 3968 POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 24) |
3969 POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 16) | 3969 POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 16) |
3970 POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 8) | 3970 POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 8) |
3971 POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 0)); 3971 POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 0));
3972 3972
3973 /* 24 (LSB), 36, 48, 54 (MSB) */ 3973 /* 24 (LSB), 36, 48, 54 (MSB) */
3974 REG_WRITE(ah, 0xa3c4, 3974 REG_WRITE(ah, AR_PHY_POWER_TX_RATE(1),
3975 POW_SM(pPwrArray[ALL_TARGET_LEGACY_54], 24) | 3975 POW_SM(pPwrArray[ALL_TARGET_LEGACY_54], 24) |
3976 POW_SM(pPwrArray[ALL_TARGET_LEGACY_48], 16) | 3976 POW_SM(pPwrArray[ALL_TARGET_LEGACY_48], 16) |
3977 POW_SM(pPwrArray[ALL_TARGET_LEGACY_36], 8) | 3977 POW_SM(pPwrArray[ALL_TARGET_LEGACY_36], 8) |
@@ -3980,14 +3980,14 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
3980 /* Write the CCK power per rate set */ 3980 /* Write the CCK power per rate set */
3981 3981
3982 /* 1L (LSB), reserved, 2L, 2S (MSB) */ 3982 /* 1L (LSB), reserved, 2L, 2S (MSB) */
3983 REG_WRITE(ah, 0xa3c8, 3983 REG_WRITE(ah, AR_PHY_POWER_TX_RATE(2),
3984 POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 24) | 3984 POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 24) |
3985 POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 16) | 3985 POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 16) |
3986 /* POW_SM(txPowerTimes2, 8) | this is reserved for AR9003 */ 3986 /* POW_SM(txPowerTimes2, 8) | this is reserved for AR9003 */
3987 POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 0)); 3987 POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 0));
3988 3988
3989 /* 5.5L (LSB), 5.5S, 11L, 11S (MSB) */ 3989 /* 5.5L (LSB), 5.5S, 11L, 11S (MSB) */
3990 REG_WRITE(ah, 0xa3cc, 3990 REG_WRITE(ah, AR_PHY_POWER_TX_RATE(3),
3991 POW_SM(pPwrArray[ALL_TARGET_LEGACY_11S], 24) | 3991 POW_SM(pPwrArray[ALL_TARGET_LEGACY_11S], 24) |
3992 POW_SM(pPwrArray[ALL_TARGET_LEGACY_11L], 16) | 3992 POW_SM(pPwrArray[ALL_TARGET_LEGACY_11L], 16) |
3993 POW_SM(pPwrArray[ALL_TARGET_LEGACY_5S], 8) | 3993 POW_SM(pPwrArray[ALL_TARGET_LEGACY_5S], 8) |
@@ -3997,7 +3997,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
3997 /* Write the HT20 power per rate set */ 3997 /* Write the HT20 power per rate set */
3998 3998
3999 /* 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB) */ 3999 /* 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB) */
4000 REG_WRITE(ah, 0xa3d0, 4000 REG_WRITE(ah, AR_PHY_POWER_TX_RATE(4),
4001 POW_SM(pPwrArray[ALL_TARGET_HT20_5], 24) | 4001 POW_SM(pPwrArray[ALL_TARGET_HT20_5], 24) |
4002 POW_SM(pPwrArray[ALL_TARGET_HT20_4], 16) | 4002 POW_SM(pPwrArray[ALL_TARGET_HT20_4], 16) |
4003 POW_SM(pPwrArray[ALL_TARGET_HT20_1_3_9_11_17_19], 8) | 4003 POW_SM(pPwrArray[ALL_TARGET_HT20_1_3_9_11_17_19], 8) |
@@ -4005,7 +4005,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
4005 ); 4005 );
4006 4006
4007 /* 6 (LSB), 7, 12, 13 (MSB) */ 4007 /* 6 (LSB), 7, 12, 13 (MSB) */
4008 REG_WRITE(ah, 0xa3d4, 4008 REG_WRITE(ah, AR_PHY_POWER_TX_RATE(5),
4009 POW_SM(pPwrArray[ALL_TARGET_HT20_13], 24) | 4009 POW_SM(pPwrArray[ALL_TARGET_HT20_13], 24) |
4010 POW_SM(pPwrArray[ALL_TARGET_HT20_12], 16) | 4010 POW_SM(pPwrArray[ALL_TARGET_HT20_12], 16) |
4011 POW_SM(pPwrArray[ALL_TARGET_HT20_7], 8) | 4011 POW_SM(pPwrArray[ALL_TARGET_HT20_7], 8) |
@@ -4013,7 +4013,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
4013 ); 4013 );
4014 4014
4015 /* 14 (LSB), 15, 20, 21 */ 4015 /* 14 (LSB), 15, 20, 21 */
4016 REG_WRITE(ah, 0xa3e4, 4016 REG_WRITE(ah, AR_PHY_POWER_TX_RATE(9),
4017 POW_SM(pPwrArray[ALL_TARGET_HT20_21], 24) | 4017 POW_SM(pPwrArray[ALL_TARGET_HT20_21], 24) |
4018 POW_SM(pPwrArray[ALL_TARGET_HT20_20], 16) | 4018 POW_SM(pPwrArray[ALL_TARGET_HT20_20], 16) |
4019 POW_SM(pPwrArray[ALL_TARGET_HT20_15], 8) | 4019 POW_SM(pPwrArray[ALL_TARGET_HT20_15], 8) |
@@ -4023,7 +4023,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
4023 /* Mixed HT20 and HT40 rates */ 4023 /* Mixed HT20 and HT40 rates */
4024 4024
4025 /* HT20 22 (LSB), HT20 23, HT40 22, HT40 23 (MSB) */ 4025 /* HT20 22 (LSB), HT20 23, HT40 22, HT40 23 (MSB) */
4026 REG_WRITE(ah, 0xa3e8, 4026 REG_WRITE(ah, AR_PHY_POWER_TX_RATE(10),
4027 POW_SM(pPwrArray[ALL_TARGET_HT40_23], 24) | 4027 POW_SM(pPwrArray[ALL_TARGET_HT40_23], 24) |
4028 POW_SM(pPwrArray[ALL_TARGET_HT40_22], 16) | 4028 POW_SM(pPwrArray[ALL_TARGET_HT40_22], 16) |
4029 POW_SM(pPwrArray[ALL_TARGET_HT20_23], 8) | 4029 POW_SM(pPwrArray[ALL_TARGET_HT20_23], 8) |
@@ -4035,7 +4035,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
4035 * correct PAR difference between HT40 and HT20/LEGACY 4035 * correct PAR difference between HT40 and HT20/LEGACY
4036 * 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB) 4036 * 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB)
4037 */ 4037 */
4038 REG_WRITE(ah, 0xa3d8, 4038 REG_WRITE(ah, AR_PHY_POWER_TX_RATE(6),
4039 POW_SM(pPwrArray[ALL_TARGET_HT40_5], 24) | 4039 POW_SM(pPwrArray[ALL_TARGET_HT40_5], 24) |
4040 POW_SM(pPwrArray[ALL_TARGET_HT40_4], 16) | 4040 POW_SM(pPwrArray[ALL_TARGET_HT40_4], 16) |
4041 POW_SM(pPwrArray[ALL_TARGET_HT40_1_3_9_11_17_19], 8) | 4041 POW_SM(pPwrArray[ALL_TARGET_HT40_1_3_9_11_17_19], 8) |
@@ -4043,7 +4043,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
4043 ); 4043 );
4044 4044
4045 /* 6 (LSB), 7, 12, 13 (MSB) */ 4045 /* 6 (LSB), 7, 12, 13 (MSB) */
4046 REG_WRITE(ah, 0xa3dc, 4046 REG_WRITE(ah, AR_PHY_POWER_TX_RATE(7),
4047 POW_SM(pPwrArray[ALL_TARGET_HT40_13], 24) | 4047 POW_SM(pPwrArray[ALL_TARGET_HT40_13], 24) |
4048 POW_SM(pPwrArray[ALL_TARGET_HT40_12], 16) | 4048 POW_SM(pPwrArray[ALL_TARGET_HT40_12], 16) |
4049 POW_SM(pPwrArray[ALL_TARGET_HT40_7], 8) | 4049 POW_SM(pPwrArray[ALL_TARGET_HT40_7], 8) |
@@ -4051,7 +4051,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
4051 ); 4051 );
4052 4052
4053 /* 14 (LSB), 15, 20, 21 */ 4053 /* 14 (LSB), 15, 20, 21 */
4054 REG_WRITE(ah, 0xa3ec, 4054 REG_WRITE(ah, AR_PHY_POWER_TX_RATE(11),
4055 POW_SM(pPwrArray[ALL_TARGET_HT40_21], 24) | 4055 POW_SM(pPwrArray[ALL_TARGET_HT40_21], 24) |
4056 POW_SM(pPwrArray[ALL_TARGET_HT40_20], 16) | 4056 POW_SM(pPwrArray[ALL_TARGET_HT40_20], 16) |
4057 POW_SM(pPwrArray[ALL_TARGET_HT40_15], 8) | 4057 POW_SM(pPwrArray[ALL_TARGET_HT40_15], 8) |
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 06fb2c85053..6fa3c24af2d 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -28,7 +28,67 @@
28 */ 28 */
29static void ar9003_hw_init_mode_regs(struct ath_hw *ah) 29static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
30{ 30{
31 if (AR_SREV_9485(ah)) { 31 if (AR_SREV_9485_11(ah)) {
32 /* mac */
33 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
34 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
35 ar9485_1_1_mac_core,
36 ARRAY_SIZE(ar9485_1_1_mac_core), 2);
37 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
38 ar9485_1_1_mac_postamble,
39 ARRAY_SIZE(ar9485_1_1_mac_postamble), 5);
40
41 /* bb */
42 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], ar9485_1_1,
43 ARRAY_SIZE(ar9485_1_1), 2);
44 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
45 ar9485_1_1_baseband_core,
46 ARRAY_SIZE(ar9485_1_1_baseband_core), 2);
47 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
48 ar9485_1_1_baseband_postamble,
49 ARRAY_SIZE(ar9485_1_1_baseband_postamble), 5);
50
51 /* radio */
52 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
53 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
54 ar9485_1_1_radio_core,
55 ARRAY_SIZE(ar9485_1_1_radio_core), 2);
56 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
57 ar9485_1_1_radio_postamble,
58 ARRAY_SIZE(ar9485_1_1_radio_postamble), 2);
59
60 /* soc */
61 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
62 ar9485_1_1_soc_preamble,
63 ARRAY_SIZE(ar9485_1_1_soc_preamble), 2);
64 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
65 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST], NULL, 0, 0);
66
67 /* rx/tx gain */
68 INIT_INI_ARRAY(&ah->iniModesRxGain,
69 ar9485_common_rx_gain_1_1,
70 ARRAY_SIZE(ar9485_common_rx_gain_1_1), 2);
71 INIT_INI_ARRAY(&ah->iniModesTxGain,
72 ar9485_modes_lowest_ob_db_tx_gain_1_1,
73 ARRAY_SIZE(ar9485_modes_lowest_ob_db_tx_gain_1_1),
74 5);
75
76 /* Load PCIE SERDES settings from INI */
77
78 /* Awake Setting */
79
80 INIT_INI_ARRAY(&ah->iniPcieSerdes,
81 ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1,
82 ARRAY_SIZE(ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1),
83 2);
84
85 /* Sleep Setting */
86
87 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
88 ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1,
89 ARRAY_SIZE(ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1),
90 2);
91 } else if (AR_SREV_9485(ah)) {
32 /* mac */ 92 /* mac */
33 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0); 93 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
34 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], 94 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
@@ -85,8 +145,8 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
85 /* Sleep Setting */ 145 /* Sleep Setting */
86 146
87 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, 147 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
88 ar9485_1_0_pcie_phy_pll_on_clkreq_enable_L1, 148 ar9485_1_0_pcie_phy_pll_on_clkreq_disable_L1,
89 ARRAY_SIZE(ar9485_1_0_pcie_phy_pll_on_clkreq_enable_L1), 149 ARRAY_SIZE(ar9485_1_0_pcie_phy_pll_on_clkreq_disable_L1),
90 2); 150 2);
91 } else { 151 } else {
92 /* mac */ 152 /* mac */
@@ -163,7 +223,12 @@ static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
163 switch (ar9003_hw_get_tx_gain_idx(ah)) { 223 switch (ar9003_hw_get_tx_gain_idx(ah)) {
164 case 0: 224 case 0:
165 default: 225 default:
166 if (AR_SREV_9485(ah)) 226 if (AR_SREV_9485_11(ah))
227 INIT_INI_ARRAY(&ah->iniModesTxGain,
228 ar9485_modes_lowest_ob_db_tx_gain_1_1,
229 ARRAY_SIZE(ar9485_modes_lowest_ob_db_tx_gain_1_1),
230 5);
231 else if (AR_SREV_9485(ah))
167 INIT_INI_ARRAY(&ah->iniModesTxGain, 232 INIT_INI_ARRAY(&ah->iniModesTxGain,
168 ar9485Modes_lowest_ob_db_tx_gain_1_0, 233 ar9485Modes_lowest_ob_db_tx_gain_1_0,
169 ARRAY_SIZE(ar9485Modes_lowest_ob_db_tx_gain_1_0), 234 ARRAY_SIZE(ar9485Modes_lowest_ob_db_tx_gain_1_0),
@@ -175,10 +240,15 @@ static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
175 5); 240 5);
176 break; 241 break;
177 case 1: 242 case 1:
178 if (AR_SREV_9485(ah)) 243 if (AR_SREV_9485_11(ah))
244 INIT_INI_ARRAY(&ah->iniModesTxGain,
245 ar9485Modes_high_ob_db_tx_gain_1_1,
246 ARRAY_SIZE(ar9485Modes_high_ob_db_tx_gain_1_1),
247 5);
248 else if (AR_SREV_9485(ah))
179 INIT_INI_ARRAY(&ah->iniModesTxGain, 249 INIT_INI_ARRAY(&ah->iniModesTxGain,
180 ar9485Modes_high_ob_db_tx_gain_1_0, 250 ar9485Modes_high_ob_db_tx_gain_1_0,
181 ARRAY_SIZE(ar9485Modes_lowest_ob_db_tx_gain_1_0), 251 ARRAY_SIZE(ar9485Modes_high_ob_db_tx_gain_1_0),
182 5); 252 5);
183 else 253 else
184 INIT_INI_ARRAY(&ah->iniModesTxGain, 254 INIT_INI_ARRAY(&ah->iniModesTxGain,
@@ -187,10 +257,15 @@ static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
187 5); 257 5);
188 break; 258 break;
189 case 2: 259 case 2:
190 if (AR_SREV_9485(ah)) 260 if (AR_SREV_9485_11(ah))
261 INIT_INI_ARRAY(&ah->iniModesTxGain,
262 ar9485Modes_low_ob_db_tx_gain_1_1,
263 ARRAY_SIZE(ar9485Modes_low_ob_db_tx_gain_1_1),
264 5);
265 else if (AR_SREV_9485(ah))
191 INIT_INI_ARRAY(&ah->iniModesTxGain, 266 INIT_INI_ARRAY(&ah->iniModesTxGain,
192 ar9485Modes_low_ob_db_tx_gain_1_0, 267 ar9485Modes_low_ob_db_tx_gain_1_0,
193 ARRAY_SIZE(ar9485Modes_lowest_ob_db_tx_gain_1_0), 268 ARRAY_SIZE(ar9485Modes_low_ob_db_tx_gain_1_0),
194 5); 269 5);
195 else 270 else
196 INIT_INI_ARRAY(&ah->iniModesTxGain, 271 INIT_INI_ARRAY(&ah->iniModesTxGain,
@@ -199,7 +274,12 @@ static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
199 5); 274 5);
200 break; 275 break;
201 case 3: 276 case 3:
202 if (AR_SREV_9485(ah)) 277 if (AR_SREV_9485_11(ah))
278 INIT_INI_ARRAY(&ah->iniModesTxGain,
279 ar9485Modes_high_power_tx_gain_1_1,
280 ARRAY_SIZE(ar9485Modes_high_power_tx_gain_1_1),
281 5);
282 else if (AR_SREV_9485(ah))
203 INIT_INI_ARRAY(&ah->iniModesTxGain, 283 INIT_INI_ARRAY(&ah->iniModesTxGain,
204 ar9485Modes_high_power_tx_gain_1_0, 284 ar9485Modes_high_power_tx_gain_1_0,
205 ARRAY_SIZE(ar9485Modes_high_power_tx_gain_1_0), 285 ARRAY_SIZE(ar9485Modes_high_power_tx_gain_1_0),
@@ -218,7 +298,12 @@ static void ar9003_rx_gain_table_apply(struct ath_hw *ah)
218 switch (ar9003_hw_get_rx_gain_idx(ah)) { 298 switch (ar9003_hw_get_rx_gain_idx(ah)) {
219 case 0: 299 case 0:
220 default: 300 default:
221 if (AR_SREV_9485(ah)) 301 if (AR_SREV_9485_11(ah))
302 INIT_INI_ARRAY(&ah->iniModesRxGain,
303 ar9485_common_rx_gain_1_1,
304 ARRAY_SIZE(ar9485_common_rx_gain_1_1),
305 2);
306 else if (AR_SREV_9485(ah))
222 INIT_INI_ARRAY(&ah->iniModesRxGain, 307 INIT_INI_ARRAY(&ah->iniModesRxGain,
223 ar9485Common_rx_gain_1_0, 308 ar9485Common_rx_gain_1_0,
224 ARRAY_SIZE(ar9485Common_rx_gain_1_0), 309 ARRAY_SIZE(ar9485Common_rx_gain_1_0),
@@ -230,7 +315,12 @@ static void ar9003_rx_gain_table_apply(struct ath_hw *ah)
230 2); 315 2);
231 break; 316 break;
232 case 1: 317 case 1:
233 if (AR_SREV_9485(ah)) 318 if (AR_SREV_9485_11(ah))
319 INIT_INI_ARRAY(&ah->iniModesRxGain,
320 ar9485Common_wo_xlna_rx_gain_1_1,
321 ARRAY_SIZE(ar9485Common_wo_xlna_rx_gain_1_1),
322 2);
323 else if (AR_SREV_9485(ah))
234 INIT_INI_ARRAY(&ah->iniModesRxGain, 324 INIT_INI_ARRAY(&ah->iniModesRxGain,
235 ar9485Common_wo_xlna_rx_gain_1_0, 325 ar9485Common_wo_xlna_rx_gain_1_0,
236 ARRAY_SIZE(ar9485Common_wo_xlna_rx_gain_1_0), 326 ARRAY_SIZE(ar9485Common_wo_xlna_rx_gain_1_0),
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index 4ceddbbdfce..038a0cbfc6e 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -615,7 +615,7 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
615 */ 615 */
616 if (rxsp->status11 & AR_CRCErr) 616 if (rxsp->status11 & AR_CRCErr)
617 rxs->rs_status |= ATH9K_RXERR_CRC; 617 rxs->rs_status |= ATH9K_RXERR_CRC;
618 if (rxsp->status11 & AR_PHYErr) { 618 else if (rxsp->status11 & AR_PHYErr) {
619 phyerr = MS(rxsp->status11, AR_PHYErrCode); 619 phyerr = MS(rxsp->status11, AR_PHYErrCode);
620 /* 620 /*
621 * If we reach a point here where AR_PostDelimCRCErr is 621 * If we reach a point here where AR_PostDelimCRCErr is
@@ -638,11 +638,11 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
638 rxs->rs_phyerr = phyerr; 638 rxs->rs_phyerr = phyerr;
639 } 639 }
640 640
641 } 641 } else if (rxsp->status11 & AR_DecryptCRCErr)
642 if (rxsp->status11 & AR_DecryptCRCErr)
643 rxs->rs_status |= ATH9K_RXERR_DECRYPT; 642 rxs->rs_status |= ATH9K_RXERR_DECRYPT;
644 if (rxsp->status11 & AR_MichaelErr) 643 else if (rxsp->status11 & AR_MichaelErr)
645 rxs->rs_status |= ATH9K_RXERR_MIC; 644 rxs->rs_status |= ATH9K_RXERR_MIC;
645
646 if (rxsp->status11 & AR_KeyMiss) 646 if (rxsp->status11 & AR_KeyMiss)
647 rxs->rs_status |= ATH9K_RXERR_DECRYPT; 647 rxs->rs_status |= ATH9K_RXERR_DECRYPT;
648 } 648 }
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 59bab6bd8a7..8bdda2cf9dd 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -486,6 +486,8 @@
486#define AR_PHY_HEAVYCLIP_40 (AR_SM_BASE + 0x1ac) 486#define AR_PHY_HEAVYCLIP_40 (AR_SM_BASE + 0x1ac)
487#define AR_PHY_ILLEGAL_TXRATE (AR_SM_BASE + 0x1b0) 487#define AR_PHY_ILLEGAL_TXRATE (AR_SM_BASE + 0x1b0)
488 488
489#define AR_PHY_POWER_TX_RATE(_d) (AR_SM_BASE + 0x1c0 + ((_d) << 2))
490
489#define AR_PHY_PWRTX_MAX (AR_SM_BASE + 0x1f0) 491#define AR_PHY_PWRTX_MAX (AR_SM_BASE + 0x1f0)
490#define AR_PHY_POWER_TX_SUB (AR_SM_BASE + 0x1f4) 492#define AR_PHY_POWER_TX_SUB (AR_SM_BASE + 0x1f4)
491 493
diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
index 70de3d89a7b..eac4d8526fc 100644
--- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
@@ -940,4 +940,1145 @@ static const u32 ar9485_1_0_mac_core[][2] = {
940 {0x000083cc, 0x00000200}, 940 {0x000083cc, 0x00000200},
941 {0x000083d0, 0x000301ff}, 941 {0x000083d0, 0x000301ff},
942}; 942};
943
944static const u32 ar9485_1_1_mac_core[][2] = {
945 /* Addr allmodes */
946 {0x00000008, 0x00000000},
947 {0x00000030, 0x00020085},
948 {0x00000034, 0x00000005},
949 {0x00000040, 0x00000000},
950 {0x00000044, 0x00000000},
951 {0x00000048, 0x00000008},
952 {0x0000004c, 0x00000010},
953 {0x00000050, 0x00000000},
954 {0x00001040, 0x002ffc0f},
955 {0x00001044, 0x002ffc0f},
956 {0x00001048, 0x002ffc0f},
957 {0x0000104c, 0x002ffc0f},
958 {0x00001050, 0x002ffc0f},
959 {0x00001054, 0x002ffc0f},
960 {0x00001058, 0x002ffc0f},
961 {0x0000105c, 0x002ffc0f},
962 {0x00001060, 0x002ffc0f},
963 {0x00001064, 0x002ffc0f},
964 {0x000010f0, 0x00000100},
965 {0x00001270, 0x00000000},
966 {0x000012b0, 0x00000000},
967 {0x000012f0, 0x00000000},
968 {0x0000143c, 0x00000000},
969 {0x0000147c, 0x00000000},
970 {0x00008000, 0x00000000},
971 {0x00008004, 0x00000000},
972 {0x00008008, 0x00000000},
973 {0x0000800c, 0x00000000},
974 {0x00008018, 0x00000000},
975 {0x00008020, 0x00000000},
976 {0x00008038, 0x00000000},
977 {0x0000803c, 0x00000000},
978 {0x00008040, 0x00000000},
979 {0x00008044, 0x00000000},
980 {0x00008048, 0x00000000},
981 {0x0000804c, 0xffffffff},
982 {0x00008054, 0x00000000},
983 {0x00008058, 0x00000000},
984 {0x0000805c, 0x000fc78f},
985 {0x00008060, 0x0000000f},
986 {0x00008064, 0x00000000},
987 {0x00008070, 0x00000310},
988 {0x00008074, 0x00000020},
989 {0x00008078, 0x00000000},
990 {0x0000809c, 0x0000000f},
991 {0x000080a0, 0x00000000},
992 {0x000080a4, 0x02ff0000},
993 {0x000080a8, 0x0e070605},
994 {0x000080ac, 0x0000000d},
995 {0x000080b0, 0x00000000},
996 {0x000080b4, 0x00000000},
997 {0x000080b8, 0x00000000},
998 {0x000080bc, 0x00000000},
999 {0x000080c0, 0x2a800000},
1000 {0x000080c4, 0x06900168},
1001 {0x000080c8, 0x13881c22},
1002 {0x000080cc, 0x01f40000},
1003 {0x000080d0, 0x00252500},
1004 {0x000080d4, 0x00a00000},
1005 {0x000080d8, 0x00400000},
1006 {0x000080dc, 0x00000000},
1007 {0x000080e0, 0xffffffff},
1008 {0x000080e4, 0x0000ffff},
1009 {0x000080e8, 0x3f3f3f3f},
1010 {0x000080ec, 0x00000000},
1011 {0x000080f0, 0x00000000},
1012 {0x000080f4, 0x00000000},
1013 {0x000080fc, 0x00020000},
1014 {0x00008100, 0x00000000},
1015 {0x00008108, 0x00000052},
1016 {0x0000810c, 0x00000000},
1017 {0x00008110, 0x00000000},
1018 {0x00008114, 0x000007ff},
1019 {0x00008118, 0x000000aa},
1020 {0x0000811c, 0x00003210},
1021 {0x00008124, 0x00000000},
1022 {0x00008128, 0x00000000},
1023 {0x0000812c, 0x00000000},
1024 {0x00008130, 0x00000000},
1025 {0x00008134, 0x00000000},
1026 {0x00008138, 0x00000000},
1027 {0x0000813c, 0x0000ffff},
1028 {0x00008144, 0xffffffff},
1029 {0x00008168, 0x00000000},
1030 {0x0000816c, 0x00000000},
1031 {0x00008170, 0x18486200},
1032 {0x00008174, 0x33332210},
1033 {0x00008178, 0x00000000},
1034 {0x0000817c, 0x00020000},
1035 {0x000081c0, 0x00000000},
1036 {0x000081c4, 0x33332210},
1037 {0x000081d4, 0x00000000},
1038 {0x000081ec, 0x00000000},
1039 {0x000081f0, 0x00000000},
1040 {0x000081f4, 0x00000000},
1041 {0x000081f8, 0x00000000},
1042 {0x000081fc, 0x00000000},
1043 {0x00008240, 0x00100000},
1044 {0x00008244, 0x0010f400},
1045 {0x00008248, 0x00000800},
1046 {0x0000824c, 0x0001e800},
1047 {0x00008250, 0x00000000},
1048 {0x00008254, 0x00000000},
1049 {0x00008258, 0x00000000},
1050 {0x0000825c, 0x40000000},
1051 {0x00008260, 0x00080922},
1052 {0x00008264, 0x9ca00010},
1053 {0x00008268, 0xffffffff},
1054 {0x0000826c, 0x0000ffff},
1055 {0x00008270, 0x00000000},
1056 {0x00008274, 0x40000000},
1057 {0x00008278, 0x003e4180},
1058 {0x0000827c, 0x00000004},
1059 {0x00008284, 0x0000002c},
1060 {0x00008288, 0x0000002c},
1061 {0x0000828c, 0x000000ff},
1062 {0x00008294, 0x00000000},
1063 {0x00008298, 0x00000000},
1064 {0x0000829c, 0x00000000},
1065 {0x00008300, 0x00000140},
1066 {0x00008314, 0x00000000},
1067 {0x0000831c, 0x0000010d},
1068 {0x00008328, 0x00000000},
1069 {0x0000832c, 0x00000007},
1070 {0x00008330, 0x00000302},
1071 {0x00008334, 0x00000700},
1072 {0x00008338, 0x00ff0000},
1073 {0x0000833c, 0x02400000},
1074 {0x00008340, 0x000107ff},
1075 {0x00008344, 0xa248105b},
1076 {0x00008348, 0x008f0000},
1077 {0x0000835c, 0x00000000},
1078 {0x00008360, 0xffffffff},
1079 {0x00008364, 0xffffffff},
1080 {0x00008368, 0x00000000},
1081 {0x00008370, 0x00000000},
1082 {0x00008374, 0x000000ff},
1083 {0x00008378, 0x00000000},
1084 {0x0000837c, 0x00000000},
1085 {0x00008380, 0xffffffff},
1086 {0x00008384, 0xffffffff},
1087 {0x00008390, 0xffffffff},
1088 {0x00008394, 0xffffffff},
1089 {0x00008398, 0x00000000},
1090 {0x0000839c, 0x00000000},
1091 {0x000083a0, 0x00000000},
1092 {0x000083a4, 0x0000fa14},
1093 {0x000083a8, 0x000f0c00},
1094 {0x000083ac, 0x33332210},
1095 {0x000083b0, 0x33332210},
1096 {0x000083b4, 0x33332210},
1097 {0x000083b8, 0x33332210},
1098 {0x000083bc, 0x00000000},
1099 {0x000083c0, 0x00000000},
1100 {0x000083c4, 0x00000000},
1101 {0x000083c8, 0x00000000},
1102 {0x000083cc, 0x00000200},
1103 {0x000083d0, 0x000301ff},
1104};
1105
1106static const u32 ar9485_1_1_baseband_core[][2] = {
1107 /* Addr allmodes */
1108 {0x00009800, 0xafe68e30},
1109 {0x00009804, 0xfd14e000},
1110 {0x00009808, 0x9c0a8f6b},
1111 {0x0000980c, 0x04800000},
1112 {0x00009814, 0x9280c00a},
1113 {0x00009818, 0x00000000},
1114 {0x0000981c, 0x00020028},
1115 {0x00009834, 0x5f3ca3de},
1116 {0x00009838, 0x0108ecff},
1117 {0x0000983c, 0x14750600},
1118 {0x00009880, 0x201fff00},
1119 {0x00009884, 0x00001042},
1120 {0x000098a4, 0x00200400},
1121 {0x000098b0, 0x52440bbe},
1122 {0x000098d0, 0x004b6a8e},
1123 {0x000098d4, 0x00000820},
1124 {0x000098dc, 0x00000000},
1125 {0x000098f0, 0x00000000},
1126 {0x000098f4, 0x00000000},
1127 {0x00009c04, 0x00000000},
1128 {0x00009c08, 0x03200000},
1129 {0x00009c0c, 0x00000000},
1130 {0x00009c10, 0x00000000},
1131 {0x00009c14, 0x00046384},
1132 {0x00009c18, 0x05b6b440},
1133 {0x00009c1c, 0x00b6b440},
1134 {0x00009d00, 0xc080a333},
1135 {0x00009d04, 0x40206c10},
1136 {0x00009d08, 0x009c4060},
1137 {0x00009d0c, 0x1883800a},
1138 {0x00009d10, 0x01834061},
1139 {0x00009d14, 0x00c00400},
1140 {0x00009d18, 0x00000000},
1141 {0x00009d1c, 0x00000000},
1142 {0x00009e08, 0x0038233c},
1143 {0x00009e24, 0x9927b515},
1144 {0x00009e28, 0x12ef0200},
1145 {0x00009e30, 0x06336f77},
1146 {0x00009e34, 0x6af6532f},
1147 {0x00009e38, 0x0cc80c00},
1148 {0x00009e40, 0x0d261820},
1149 {0x00009e4c, 0x00001004},
1150 {0x00009e50, 0x00ff03f1},
1151 {0x00009fc0, 0x80be4788},
1152 {0x00009fc4, 0x0001efb5},
1153 {0x00009fcc, 0x40000014},
1154 {0x0000a20c, 0x00000000},
1155 {0x0000a210, 0x00000000},
1156 {0x0000a220, 0x00000000},
1157 {0x0000a224, 0x00000000},
1158 {0x0000a228, 0x10002310},
1159 {0x0000a23c, 0x00000000},
1160 {0x0000a244, 0x0c000000},
1161 {0x0000a2a0, 0x00000001},
1162 {0x0000a2c0, 0x00000001},
1163 {0x0000a2c8, 0x00000000},
1164 {0x0000a2cc, 0x18c43433},
1165 {0x0000a2d4, 0x00000000},
1166 {0x0000a2dc, 0x00000000},
1167 {0x0000a2e0, 0x00000000},
1168 {0x0000a2e4, 0x00000000},
1169 {0x0000a2e8, 0x00000000},
1170 {0x0000a2ec, 0x00000000},
1171 {0x0000a2f0, 0x00000000},
1172 {0x0000a2f4, 0x00000000},
1173 {0x0000a2f8, 0x00000000},
1174 {0x0000a344, 0x00000000},
1175 {0x0000a34c, 0x00000000},
1176 {0x0000a350, 0x0000a000},
1177 {0x0000a364, 0x00000000},
1178 {0x0000a370, 0x00000000},
1179 {0x0000a390, 0x00000001},
1180 {0x0000a394, 0x00000444},
1181 {0x0000a398, 0x001f0e0f},
1182 {0x0000a39c, 0x0075393f},
1183 {0x0000a3a0, 0xb79f6427},
1184 {0x0000a3a4, 0x000000ff},
1185 {0x0000a3a8, 0x3b3b3b3b},
1186 {0x0000a3ac, 0x2f2f2f2f},
1187 {0x0000a3c0, 0x20202020},
1188 {0x0000a3c4, 0x22222220},
1189 {0x0000a3c8, 0x20200020},
1190 {0x0000a3cc, 0x20202020},
1191 {0x0000a3d0, 0x20202020},
1192 {0x0000a3d4, 0x20202020},
1193 {0x0000a3d8, 0x20202020},
1194 {0x0000a3dc, 0x20202020},
1195 {0x0000a3e0, 0x20202020},
1196 {0x0000a3e4, 0x20202020},
1197 {0x0000a3e8, 0x20202020},
1198 {0x0000a3ec, 0x20202020},
1199 {0x0000a3f0, 0x00000000},
1200 {0x0000a3f4, 0x00000006},
1201 {0x0000a3f8, 0x0cdbd380},
1202 {0x0000a3fc, 0x000f0f01},
1203 {0x0000a400, 0x8fa91f01},
1204 {0x0000a404, 0x00000000},
1205 {0x0000a408, 0x0e79e5c6},
1206 {0x0000a40c, 0x00820820},
1207 {0x0000a414, 0x1ce739cf},
1208 {0x0000a418, 0x2d0019ce},
1209 {0x0000a41c, 0x1ce739ce},
1210 {0x0000a420, 0x000001ce},
1211 {0x0000a424, 0x1ce739ce},
1212 {0x0000a428, 0x000001ce},
1213 {0x0000a42c, 0x1ce739ce},
1214 {0x0000a430, 0x1ce739ce},
1215 {0x0000a434, 0x00000000},
1216 {0x0000a438, 0x00001801},
1217 {0x0000a43c, 0x00000000},
1218 {0x0000a440, 0x00000000},
1219 {0x0000a444, 0x00000000},
1220 {0x0000a448, 0x04000000},
1221 {0x0000a44c, 0x00000001},
1222 {0x0000a450, 0x00010000},
1223 {0x0000a5c4, 0xbfad9d74},
1224 {0x0000a5c8, 0x0048060a},
1225 {0x0000a5cc, 0x00000637},
1226 {0x0000a760, 0x03020100},
1227 {0x0000a764, 0x09080504},
1228 {0x0000a768, 0x0d0c0b0a},
1229 {0x0000a76c, 0x13121110},
1230 {0x0000a770, 0x31301514},
1231 {0x0000a774, 0x35343332},
1232 {0x0000a778, 0x00000036},
1233 {0x0000a780, 0x00000838},
1234 {0x0000a7c0, 0x00000000},
1235 {0x0000a7c4, 0xfffffffc},
1236 {0x0000a7c8, 0x00000000},
1237 {0x0000a7cc, 0x00000000},
1238 {0x0000a7d0, 0x00000000},
1239 {0x0000a7d4, 0x00000004},
1240 {0x0000a7dc, 0x00000000},
1241};
1242
1243static const u32 ar9485Common_1_1[][2] = {
1244 /* Addr allmodes */
1245 {0x00007010, 0x00000022},
1246 {0x00007020, 0x00000000},
1247 {0x00007034, 0x00000002},
1248 {0x00007038, 0x000004c2},
1249};
1250
1251static const u32 ar9485_1_1_baseband_postamble[][5] = {
1252 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1253 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
1254 {0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e},
1255 {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
1256 {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
1257 {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
1258 {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
1259 {0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044},
1260 {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
1261 {0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020},
1262 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
1263 {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec80d2e, 0x7ec80d2e},
1264 {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
1265 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1266 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
1267 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
1268 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
1269 {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
1270 {0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324},
1271 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
1272 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
1273 {0x0000a204, 0x01303fc0, 0x01303fc4, 0x01303fc4, 0x01303fc0},
1274 {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
1275 {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b},
1276 {0x0000a234, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff},
1277 {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
1278 {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
1279 {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
1280 {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
1281 {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
1282 {0x0000a260, 0x3a021501, 0x3a021501, 0x3a021501, 0x3a021501},
1283 {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
1284 {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
1285 {0x0000a284, 0x00000000, 0x00000000, 0x000002a0, 0x000002a0},
1286 {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1287 {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1288 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
1289 {0x0000a2d0, 0x00071981, 0x00071981, 0x00071982, 0x00071982},
1290 {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
1291 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1292 {0x0000be04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
1293 {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1294};
1295
1296static const u32 ar9485Modes_high_ob_db_tx_gain_1_1[][5] = {
1297 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1298 {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
1299 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
1300 {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1301 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
1302 {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
1303 {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
1304 {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
1305 {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
1306 {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
1307 {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
1308 {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
1309 {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
1310 {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
1311 {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
1312 {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
1313 {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
1314 {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
1315 {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
1316 {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
1317 {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
1318 {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
1319 {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
1320 {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
1321 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
1322 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
1323 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001eeb, 0x5a001eeb},
1324 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
1325 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
1326 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
1327 {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1328 {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1329 {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1330 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1331 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1332 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1333 {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1334 {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1335 {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1336 {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1337 {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1338 {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1339 {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1340 {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1341 {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1342 {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1343 {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1344 {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1345 {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1346 {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1347 {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1348 {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1349 {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1350 {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1351 {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1352 {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1353 {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1354 {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1355 {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1356 {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1357 {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1358 {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1359 {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1360 {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1361 {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1362 {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1363 {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1364 {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1365 {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
1366 {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
1367};
1368
1369static const u32 ar9485_modes_lowest_ob_db_tx_gain_1_1[][5] = {
1370 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1371 {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
1372 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
1373 {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1374 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
1375 {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
1376 {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
1377 {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
1378 {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
1379 {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
1380 {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
1381 {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
1382 {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
1383 {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
1384 {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
1385 {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
1386 {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
1387 {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
1388 {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
1389 {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
1390 {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
1391 {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
1392 {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
1393 {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
1394 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
1395 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
1396 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001eeb, 0x5a001eeb},
1397 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
1398 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
1399 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
1400 {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1401 {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1402 {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1403 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1404 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1405 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1406 {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1407 {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1408 {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1409 {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1410 {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1411 {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1412 {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1413 {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1414 {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1415 {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1416 {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1417 {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1418 {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1419 {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1420 {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1421 {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1422 {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1423 {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1424 {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1425 {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1426 {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1427 {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1428 {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1429 {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1430 {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1431 {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1432 {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1433 {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1434 {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1435 {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1436 {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1437 {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1438 {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
1439 {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
1440};
1441
1442static const u32 ar9485_1_1_radio_postamble[][2] = {
1443 /* Addr allmodes */
1444 {0x0001609c, 0x0b283f31},
1445 {0x000160ac, 0x24611800},
1446 {0x000160b0, 0x03284f3e},
1447 {0x0001610c, 0x00170000},
1448 {0x00016140, 0x10804008},
1449};
1450
1451static const u32 ar9485_1_1_mac_postamble[][5] = {
1452 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1453 {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
1454 {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
1455 {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
1456 {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
1457 {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
1458 {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
1459 {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
1460 {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
1461};
1462
1463static const u32 ar9485_1_1_radio_core[][2] = {
1464 /* Addr allmodes */
1465 {0x00016000, 0x36db6db6},
1466 {0x00016004, 0x6db6db40},
1467 {0x00016008, 0x73800000},
1468 {0x0001600c, 0x00000000},
1469 {0x00016040, 0x7f80fff8},
1470 {0x0001604c, 0x000f0278},
1471 {0x00016050, 0x4db6db8c},
1472 {0x00016054, 0x6db60000},
1473 {0x00016080, 0x00080000},
1474 {0x00016084, 0x0e48048c},
1475 {0x00016088, 0x14214514},
1476 {0x0001608c, 0x119f081e},
1477 {0x00016090, 0x24926490},
1478 {0x00016098, 0xd28b3330},
1479 {0x000160a0, 0xc2108ffe},
1480 {0x000160a4, 0x812fc370},
1481 {0x000160a8, 0x423c8000},
1482 {0x000160b4, 0x92480040},
1483 {0x000160c0, 0x006db6db},
1484 {0x000160c4, 0x0186db60},
1485 {0x000160c8, 0x6db6db6c},
1486 {0x000160cc, 0x6de6fbe0},
1487 {0x000160d0, 0xf7dfcf3c},
1488 {0x00016100, 0x04cb0001},
1489 {0x00016104, 0xfff80015},
1490 {0x00016108, 0x00080010},
1491 {0x00016144, 0x01884080},
1492 {0x00016148, 0x00008040},
1493 {0x00016240, 0x08400000},
1494 {0x00016244, 0x1bf90f00},
1495 {0x00016248, 0x00000000},
1496 {0x0001624c, 0x00000000},
1497 {0x00016280, 0x01000015},
1498 {0x00016284, 0x00d30000},
1499 {0x00016288, 0x00318000},
1500 {0x0001628c, 0x50000000},
1501 {0x00016290, 0x4b96210f},
1502 {0x00016380, 0x00000000},
1503 {0x00016384, 0x00000000},
1504 {0x00016388, 0x00800700},
1505 {0x0001638c, 0x00800700},
1506 {0x00016390, 0x00800700},
1507 {0x00016394, 0x00000000},
1508 {0x00016398, 0x00000000},
1509 {0x0001639c, 0x00000000},
1510 {0x000163a0, 0x00000001},
1511 {0x000163a4, 0x00000001},
1512 {0x000163a8, 0x00000000},
1513 {0x000163ac, 0x00000000},
1514 {0x000163b0, 0x00000000},
1515 {0x000163b4, 0x00000000},
1516 {0x000163b8, 0x00000000},
1517 {0x000163bc, 0x00000000},
1518 {0x000163c0, 0x000000a0},
1519 {0x000163c4, 0x000c0000},
1520 {0x000163c8, 0x14021402},
1521 {0x000163cc, 0x00001402},
1522 {0x000163d0, 0x00000000},
1523 {0x000163d4, 0x00000000},
1524 {0x00016c40, 0x13188278},
1525 {0x00016c44, 0x12000000},
1526};
1527
1528static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_enable_L1[][2] = {
1529 /* Addr allmodes */
1530 {0x00018c00, 0x10052e5e},
1531 {0x00018c04, 0x000801d8},
1532 {0x00018c08, 0x0000080c},
1533};
1534
1535static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = {
1536 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1537 {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
1538 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
1539 {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1540 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
1541 {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
1542 {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
1543 {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
1544 {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
1545 {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
1546 {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
1547 {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
1548 {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
1549 {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
1550 {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
1551 {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
1552 {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
1553 {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
1554 {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
1555 {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
1556 {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
1557 {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
1558 {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
1559 {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
1560 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
1561 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
1562 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001eeb, 0x5a001eeb},
1563 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
1564 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
1565 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
1566 {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1567 {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1568 {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1569 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1570 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1571 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1572 {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1573 {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1574 {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1575 {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1576 {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1577 {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1578 {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1579 {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1580 {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1581 {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1582 {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1583 {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1584 {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1585 {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1586 {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1587 {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1588 {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1589 {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1590 {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1591 {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1592 {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1593 {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1594 {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1595 {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1596 {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1597 {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1598 {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1599 {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1600 {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1601 {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1602 {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1603 {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1604 {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
1605 {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
1606};
1607
1608static const u32 ar9485_1_1[][2] = {
1609 /* Addr allmodes */
1610 {0x0000a580, 0x00000000},
1611 {0x0000a584, 0x00000000},
1612 {0x0000a588, 0x00000000},
1613 {0x0000a58c, 0x00000000},
1614 {0x0000a590, 0x00000000},
1615 {0x0000a594, 0x00000000},
1616 {0x0000a598, 0x00000000},
1617 {0x0000a59c, 0x00000000},
1618 {0x0000a5a0, 0x00000000},
1619 {0x0000a5a4, 0x00000000},
1620 {0x0000a5a8, 0x00000000},
1621 {0x0000a5ac, 0x00000000},
1622 {0x0000a5b0, 0x00000000},
1623 {0x0000a5b4, 0x00000000},
1624 {0x0000a5b8, 0x00000000},
1625 {0x0000a5bc, 0x00000000},
1626};
1627
1628static const u32 ar9485_modes_green_ob_db_tx_gain_1_1[][5] = {
1629 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1630 {0x000098bc, 0x00000003, 0x00000003, 0x00000003, 0x00000003},
1631 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
1632 {0x0000a458, 0x80000000, 0x80000000, 0x80000000, 0x80000000},
1633 {0x0000a500, 0x00022200, 0x00022200, 0x00000006, 0x00000006},
1634 {0x0000a504, 0x05062002, 0x05062002, 0x03000201, 0x03000201},
1635 {0x0000a508, 0x0c002e00, 0x0c002e00, 0x06000203, 0x06000203},
1636 {0x0000a50c, 0x11062202, 0x11062202, 0x0a000401, 0x0a000401},
1637 {0x0000a510, 0x17022e00, 0x17022e00, 0x0e000403, 0x0e000403},
1638 {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x12000405, 0x12000405},
1639 {0x0000a518, 0x25020ec0, 0x25020ec0, 0x15000604, 0x15000604},
1640 {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x18000605, 0x18000605},
1641 {0x0000a520, 0x2f001f04, 0x2f001f04, 0x1c000a04, 0x1c000a04},
1642 {0x0000a524, 0x35001fc4, 0x35001fc4, 0x21000a06, 0x21000a06},
1643 {0x0000a528, 0x3c022f04, 0x3c022f04, 0x29000a24, 0x29000a24},
1644 {0x0000a52c, 0x41023e85, 0x41023e85, 0x2f000e21, 0x2f000e21},
1645 {0x0000a530, 0x48023ec6, 0x48023ec6, 0x31000e20, 0x31000e20},
1646 {0x0000a534, 0x4d023f01, 0x4d023f01, 0x33000e20, 0x33000e20},
1647 {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
1648 {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
1649 {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
1650 {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
1651 {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
1652 {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
1653 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
1654 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
1655 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001eeb, 0x5a001eeb},
1656 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
1657 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
1658 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
1659 {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1660 {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1661 {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1662 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1663 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1664 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1665 {0x0000b500, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
1666 {0x0000b504, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
1667 {0x0000b508, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
1668 {0x0000b50c, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
1669 {0x0000b510, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
1670 {0x0000b514, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
1671 {0x0000b518, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
1672 {0x0000b51c, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
1673 {0x0000b520, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
1674 {0x0000b524, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
1675 {0x0000b528, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
1676 {0x0000b52c, 0x0000002a, 0x0000002a, 0x0000002a, 0x0000002a},
1677 {0x0000b530, 0x0000003a, 0x0000003a, 0x0000003a, 0x0000003a},
1678 {0x0000b534, 0x0000004a, 0x0000004a, 0x0000004a, 0x0000004a},
1679 {0x0000b538, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
1680 {0x0000b53c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
1681 {0x0000b540, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
1682 {0x0000b544, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
1683 {0x0000b548, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
1684 {0x0000b54c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
1685 {0x0000b550, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
1686 {0x0000b554, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
1687 {0x0000b558, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
1688 {0x0000b55c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
1689 {0x0000b560, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
1690 {0x0000b564, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
1691 {0x0000b568, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
1692 {0x0000b56c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
1693 {0x0000b570, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
1694 {0x0000b574, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
1695 {0x0000b578, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
1696 {0x0000b57c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
1697 {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
1698 {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
1699};
1700
1701static const u32 ar9485_1_1_pcie_phy_clkreq_disable_L1[][2] = {
1702 /* Addr allmodes */
1703 {0x00018c00, 0x10013e5e},
1704 {0x00018c04, 0x000801d8},
1705 {0x00018c08, 0x0000080c},
1706};
1707
1708static const u32 ar9485_1_1_soc_preamble[][2] = {
1709 /* Addr allmodes */
1710 {0x00004014, 0xba280400},
1711 {0x000040a4, 0x00a0c9c9},
1712 {0x00007010, 0x00000022},
1713 {0x00007020, 0x00000000},
1714 {0x00007034, 0x00000002},
1715 {0x00007038, 0x000004c2},
1716 {0x00007048, 0x00000002},
1717};
1718
1719static const u32 ar9485_1_1_baseband_core_txfir_coeff_japan_2484[][2] = {
1720 /* Addr allmodes */
1721 {0x0000a398, 0x00000000},
1722 {0x0000a39c, 0x6f7f0301},
1723 {0x0000a3a0, 0xca9228ee},
1724};
1725
1726static const u32 ar9485Modes_low_ob_db_tx_gain_1_1[][5] = {
1727 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1728 {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
1729 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
1730 {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1731 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
1732 {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
1733 {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
1734 {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
1735 {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
1736 {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
1737 {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
1738 {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
1739 {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
1740 {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
1741 {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
1742 {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
1743 {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
1744 {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
1745 {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
1746 {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
1747 {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
1748 {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
1749 {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
1750 {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
1751 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
1752 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
1753 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001eeb, 0x5a001eeb},
1754 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
1755 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
1756 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
1757 {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1758 {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1759 {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1760 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1761 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1762 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
1763 {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1764 {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1765 {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1766 {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1767 {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1768 {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1769 {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1770 {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1771 {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1772 {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1773 {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1774 {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1775 {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1776 {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1777 {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1778 {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1779 {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1780 {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1781 {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1782 {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1783 {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1784 {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1785 {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1786 {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1787 {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1788 {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1789 {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1790 {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1791 {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1792 {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1793 {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1794 {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1795 {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
1796 {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
1797};
1798
1799static const u32 ar9485_fast_clock_1_1_baseband_postamble[][3] = {
1800 /* Addr 5G_HT2 5G_HT40 */
1801 {0x00009e00, 0x03721821, 0x03721821},
1802 {0x0000a230, 0x0000400b, 0x00004016},
1803 {0x0000a254, 0x00000898, 0x00001130},
1804};
1805
1806static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1[][2] = {
1807 /* Addr allmodes */
1808 {0x00018c00, 0x10012e5e},
1809 {0x00018c04, 0x000801d8},
1810 {0x00018c08, 0x0000080c},
1811};
1812
1813static const u32 ar9485_common_rx_gain_1_1[][2] = {
1814 /* Addr allmodes */
1815 {0x0000a000, 0x00010000},
1816 {0x0000a004, 0x00030002},
1817 {0x0000a008, 0x00050004},
1818 {0x0000a00c, 0x00810080},
1819 {0x0000a010, 0x01800082},
1820 {0x0000a014, 0x01820181},
1821 {0x0000a018, 0x01840183},
1822 {0x0000a01c, 0x01880185},
1823 {0x0000a020, 0x018a0189},
1824 {0x0000a024, 0x02850284},
1825 {0x0000a028, 0x02890288},
1826 {0x0000a02c, 0x03850384},
1827 {0x0000a030, 0x03890388},
1828 {0x0000a034, 0x038b038a},
1829 {0x0000a038, 0x038d038c},
1830 {0x0000a03c, 0x03910390},
1831 {0x0000a040, 0x03930392},
1832 {0x0000a044, 0x03950394},
1833 {0x0000a048, 0x00000396},
1834 {0x0000a04c, 0x00000000},
1835 {0x0000a050, 0x00000000},
1836 {0x0000a054, 0x00000000},
1837 {0x0000a058, 0x00000000},
1838 {0x0000a05c, 0x00000000},
1839 {0x0000a060, 0x00000000},
1840 {0x0000a064, 0x00000000},
1841 {0x0000a068, 0x00000000},
1842 {0x0000a06c, 0x00000000},
1843 {0x0000a070, 0x00000000},
1844 {0x0000a074, 0x00000000},
1845 {0x0000a078, 0x00000000},
1846 {0x0000a07c, 0x00000000},
1847 {0x0000a080, 0x28282828},
1848 {0x0000a084, 0x28282828},
1849 {0x0000a088, 0x28282828},
1850 {0x0000a08c, 0x28282828},
1851 {0x0000a090, 0x28282828},
1852 {0x0000a094, 0x21212128},
1853 {0x0000a098, 0x171c1c1c},
1854 {0x0000a09c, 0x02020212},
1855 {0x0000a0a0, 0x00000202},
1856 {0x0000a0a4, 0x00000000},
1857 {0x0000a0a8, 0x00000000},
1858 {0x0000a0ac, 0x00000000},
1859 {0x0000a0b0, 0x00000000},
1860 {0x0000a0b4, 0x00000000},
1861 {0x0000a0b8, 0x00000000},
1862 {0x0000a0bc, 0x00000000},
1863 {0x0000a0c0, 0x001f0000},
1864 {0x0000a0c4, 0x111f1100},
1865 {0x0000a0c8, 0x111d111e},
1866 {0x0000a0cc, 0x111b111c},
1867 {0x0000a0d0, 0x22032204},
1868 {0x0000a0d4, 0x22012202},
1869 {0x0000a0d8, 0x221f2200},
1870 {0x0000a0dc, 0x221d221e},
1871 {0x0000a0e0, 0x33013302},
1872 {0x0000a0e4, 0x331f3300},
1873 {0x0000a0e8, 0x4402331e},
1874 {0x0000a0ec, 0x44004401},
1875 {0x0000a0f0, 0x441e441f},
1876 {0x0000a0f4, 0x55015502},
1877 {0x0000a0f8, 0x551f5500},
1878 {0x0000a0fc, 0x6602551e},
1879 {0x0000a100, 0x66006601},
1880 {0x0000a104, 0x661e661f},
1881 {0x0000a108, 0x7703661d},
1882 {0x0000a10c, 0x77017702},
1883 {0x0000a110, 0x00007700},
1884 {0x0000a114, 0x00000000},
1885 {0x0000a118, 0x00000000},
1886 {0x0000a11c, 0x00000000},
1887 {0x0000a120, 0x00000000},
1888 {0x0000a124, 0x00000000},
1889 {0x0000a128, 0x00000000},
1890 {0x0000a12c, 0x00000000},
1891 {0x0000a130, 0x00000000},
1892 {0x0000a134, 0x00000000},
1893 {0x0000a138, 0x00000000},
1894 {0x0000a13c, 0x00000000},
1895 {0x0000a140, 0x001f0000},
1896 {0x0000a144, 0x111f1100},
1897 {0x0000a148, 0x111d111e},
1898 {0x0000a14c, 0x111b111c},
1899 {0x0000a150, 0x22032204},
1900 {0x0000a154, 0x22012202},
1901 {0x0000a158, 0x221f2200},
1902 {0x0000a15c, 0x221d221e},
1903 {0x0000a160, 0x33013302},
1904 {0x0000a164, 0x331f3300},
1905 {0x0000a168, 0x4402331e},
1906 {0x0000a16c, 0x44004401},
1907 {0x0000a170, 0x441e441f},
1908 {0x0000a174, 0x55015502},
1909 {0x0000a178, 0x551f5500},
1910 {0x0000a17c, 0x6602551e},
1911 {0x0000a180, 0x66006601},
1912 {0x0000a184, 0x661e661f},
1913 {0x0000a188, 0x7703661d},
1914 {0x0000a18c, 0x77017702},
1915 {0x0000a190, 0x00007700},
1916 {0x0000a194, 0x00000000},
1917 {0x0000a198, 0x00000000},
1918 {0x0000a19c, 0x00000000},
1919 {0x0000a1a0, 0x00000000},
1920 {0x0000a1a4, 0x00000000},
1921 {0x0000a1a8, 0x00000000},
1922 {0x0000a1ac, 0x00000000},
1923 {0x0000a1b0, 0x00000000},
1924 {0x0000a1b4, 0x00000000},
1925 {0x0000a1b8, 0x00000000},
1926 {0x0000a1bc, 0x00000000},
1927 {0x0000a1c0, 0x00000000},
1928 {0x0000a1c4, 0x00000000},
1929 {0x0000a1c8, 0x00000000},
1930 {0x0000a1cc, 0x00000000},
1931 {0x0000a1d0, 0x00000000},
1932 {0x0000a1d4, 0x00000000},
1933 {0x0000a1d8, 0x00000000},
1934 {0x0000a1dc, 0x00000000},
1935 {0x0000a1e0, 0x00000000},
1936 {0x0000a1e4, 0x00000000},
1937 {0x0000a1e8, 0x00000000},
1938 {0x0000a1ec, 0x00000000},
1939 {0x0000a1f0, 0x00000396},
1940 {0x0000a1f4, 0x00000396},
1941 {0x0000a1f8, 0x00000396},
1942 {0x0000a1fc, 0x00000296},
1943};
1944
1945static const u32 ar9485_1_1_pcie_phy_clkreq_enable_L1[][2] = {
1946 /* Addr allmodes */
1947 {0x00018c00, 0x10053e5e},
1948 {0x00018c04, 0x000801d8},
1949 {0x00018c08, 0x0000080c},
1950};
1951
1952static const u32 ar9485Common_wo_xlna_rx_gain_1_1[][2] = {
1953 /* Addr allmodes */
1954 {0x0000a000, 0x00060005},
1955 {0x0000a004, 0x00810080},
1956 {0x0000a008, 0x00830082},
1957 {0x0000a00c, 0x00850084},
1958 {0x0000a010, 0x01820181},
1959 {0x0000a014, 0x01840183},
1960 {0x0000a018, 0x01880185},
1961 {0x0000a01c, 0x018a0189},
1962 {0x0000a020, 0x02850284},
1963 {0x0000a024, 0x02890288},
1964 {0x0000a028, 0x028b028a},
1965 {0x0000a02c, 0x03850384},
1966 {0x0000a030, 0x03890388},
1967 {0x0000a034, 0x038b038a},
1968 {0x0000a038, 0x038d038c},
1969 {0x0000a03c, 0x03910390},
1970 {0x0000a040, 0x03930392},
1971 {0x0000a044, 0x03950394},
1972 {0x0000a048, 0x00000396},
1973 {0x0000a04c, 0x00000000},
1974 {0x0000a050, 0x00000000},
1975 {0x0000a054, 0x00000000},
1976 {0x0000a058, 0x00000000},
1977 {0x0000a05c, 0x00000000},
1978 {0x0000a060, 0x00000000},
1979 {0x0000a064, 0x00000000},
1980 {0x0000a068, 0x00000000},
1981 {0x0000a06c, 0x00000000},
1982 {0x0000a070, 0x00000000},
1983 {0x0000a074, 0x00000000},
1984 {0x0000a078, 0x00000000},
1985 {0x0000a07c, 0x00000000},
1986 {0x0000a080, 0x28282828},
1987 {0x0000a084, 0x28282828},
1988 {0x0000a088, 0x28282828},
1989 {0x0000a08c, 0x28282828},
1990 {0x0000a090, 0x28282828},
1991 {0x0000a094, 0x24242428},
1992 {0x0000a098, 0x171e1e1e},
1993 {0x0000a09c, 0x02020b0b},
1994 {0x0000a0a0, 0x02020202},
1995 {0x0000a0a4, 0x00000000},
1996 {0x0000a0a8, 0x00000000},
1997 {0x0000a0ac, 0x00000000},
1998 {0x0000a0b0, 0x00000000},
1999 {0x0000a0b4, 0x00000000},
2000 {0x0000a0b8, 0x00000000},
2001 {0x0000a0bc, 0x00000000},
2002 {0x0000a0c0, 0x22072208},
2003 {0x0000a0c4, 0x22052206},
2004 {0x0000a0c8, 0x22032204},
2005 {0x0000a0cc, 0x22012202},
2006 {0x0000a0d0, 0x221f2200},
2007 {0x0000a0d4, 0x221d221e},
2008 {0x0000a0d8, 0x33023303},
2009 {0x0000a0dc, 0x33003301},
2010 {0x0000a0e0, 0x331e331f},
2011 {0x0000a0e4, 0x4402331d},
2012 {0x0000a0e8, 0x44004401},
2013 {0x0000a0ec, 0x441e441f},
2014 {0x0000a0f0, 0x55025503},
2015 {0x0000a0f4, 0x55005501},
2016 {0x0000a0f8, 0x551e551f},
2017 {0x0000a0fc, 0x6602551d},
2018 {0x0000a100, 0x66006601},
2019 {0x0000a104, 0x661e661f},
2020 {0x0000a108, 0x7703661d},
2021 {0x0000a10c, 0x77017702},
2022 {0x0000a110, 0x00007700},
2023 {0x0000a114, 0x00000000},
2024 {0x0000a118, 0x00000000},
2025 {0x0000a11c, 0x00000000},
2026 {0x0000a120, 0x00000000},
2027 {0x0000a124, 0x00000000},
2028 {0x0000a128, 0x00000000},
2029 {0x0000a12c, 0x00000000},
2030 {0x0000a130, 0x00000000},
2031 {0x0000a134, 0x00000000},
2032 {0x0000a138, 0x00000000},
2033 {0x0000a13c, 0x00000000},
2034 {0x0000a140, 0x001f0000},
2035 {0x0000a144, 0x111f1100},
2036 {0x0000a148, 0x111d111e},
2037 {0x0000a14c, 0x111b111c},
2038 {0x0000a150, 0x22032204},
2039 {0x0000a154, 0x22012202},
2040 {0x0000a158, 0x221f2200},
2041 {0x0000a15c, 0x221d221e},
2042 {0x0000a160, 0x33013302},
2043 {0x0000a164, 0x331f3300},
2044 {0x0000a168, 0x4402331e},
2045 {0x0000a16c, 0x44004401},
2046 {0x0000a170, 0x441e441f},
2047 {0x0000a174, 0x55015502},
2048 {0x0000a178, 0x551f5500},
2049 {0x0000a17c, 0x6602551e},
2050 {0x0000a180, 0x66006601},
2051 {0x0000a184, 0x661e661f},
2052 {0x0000a188, 0x7703661d},
2053 {0x0000a18c, 0x77017702},
2054 {0x0000a190, 0x00007700},
2055 {0x0000a194, 0x00000000},
2056 {0x0000a198, 0x00000000},
2057 {0x0000a19c, 0x00000000},
2058 {0x0000a1a0, 0x00000000},
2059 {0x0000a1a4, 0x00000000},
2060 {0x0000a1a8, 0x00000000},
2061 {0x0000a1ac, 0x00000000},
2062 {0x0000a1b0, 0x00000000},
2063 {0x0000a1b4, 0x00000000},
2064 {0x0000a1b8, 0x00000000},
2065 {0x0000a1bc, 0x00000000},
2066 {0x0000a1c0, 0x00000000},
2067 {0x0000a1c4, 0x00000000},
2068 {0x0000a1c8, 0x00000000},
2069 {0x0000a1cc, 0x00000000},
2070 {0x0000a1d0, 0x00000000},
2071 {0x0000a1d4, 0x00000000},
2072 {0x0000a1d8, 0x00000000},
2073 {0x0000a1dc, 0x00000000},
2074 {0x0000a1e0, 0x00000000},
2075 {0x0000a1e4, 0x00000000},
2076 {0x0000a1e8, 0x00000000},
2077 {0x0000a1ec, 0x00000000},
2078 {0x0000a1f0, 0x00000396},
2079 {0x0000a1f4, 0x00000396},
2080 {0x0000a1f8, 0x00000396},
2081 {0x0000a1fc, 0x00000296},
2082};
2083
943#endif 2084#endif
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 1a7fa6ea4cf..f9f0389b92a 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -92,9 +92,9 @@ struct ath_config {
92 * @BUF_XRETRY: To denote excessive retries of the buffer 92 * @BUF_XRETRY: To denote excessive retries of the buffer
93 */ 93 */
94enum buffer_type { 94enum buffer_type {
95 BUF_AMPDU = BIT(2), 95 BUF_AMPDU = BIT(0),
96 BUF_AGGR = BIT(3), 96 BUF_AGGR = BIT(1),
97 BUF_XRETRY = BIT(5), 97 BUF_XRETRY = BIT(2),
98}; 98};
99 99
100#define bf_isampdu(bf) (bf->bf_state.bf_type & BUF_AMPDU) 100#define bf_isampdu(bf) (bf->bf_state.bf_type & BUF_AMPDU)
@@ -134,7 +134,6 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
134 (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \ 134 (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \
135 WME_AC_VO) 135 WME_AC_VO)
136 136
137#define ADDBA_EXCHANGE_ATTEMPTS 10
138#define ATH_AGGR_DELIM_SZ 4 137#define ATH_AGGR_DELIM_SZ 4
139#define ATH_AGGR_MINPLEN 256 /* in bytes, minimum packet length */ 138#define ATH_AGGR_MINPLEN 256 /* in bytes, minimum packet length */
140/* number of delimiters for encryption padding */ 139/* number of delimiters for encryption padding */
@@ -181,7 +180,8 @@ enum ATH_AGGR_STATUS {
181 180
182#define ATH_TXFIFO_DEPTH 8 181#define ATH_TXFIFO_DEPTH 8
183struct ath_txq { 182struct ath_txq {
184 u32 axq_qnum; 183 int mac80211_qnum; /* mac80211 queue number, -1 means not mac80211 Q */
184 u32 axq_qnum; /* ath9k hardware queue number */
185 u32 *axq_link; 185 u32 *axq_link;
186 struct list_head axq_q; 186 struct list_head axq_q;
187 spinlock_t axq_lock; 187 spinlock_t axq_lock;
@@ -189,6 +189,7 @@ struct ath_txq {
189 u32 axq_ampdu_depth; 189 u32 axq_ampdu_depth;
190 bool stopped; 190 bool stopped;
191 bool axq_tx_inprogress; 191 bool axq_tx_inprogress;
192 bool txq_flush_inprogress;
192 struct list_head axq_acq; 193 struct list_head axq_acq;
193 struct list_head txq_fifo[ATH_TXFIFO_DEPTH]; 194 struct list_head txq_fifo[ATH_TXFIFO_DEPTH];
194 struct list_head txq_fifo_pending; 195 struct list_head txq_fifo_pending;
@@ -231,7 +232,6 @@ struct ath_buf {
231 bool bf_stale; 232 bool bf_stale;
232 u16 bf_flags; 233 u16 bf_flags;
233 struct ath_buf_state bf_state; 234 struct ath_buf_state bf_state;
234 struct ath_wiphy *aphy;
235}; 235};
236 236
237struct ath_atx_tid { 237struct ath_atx_tid {
@@ -252,7 +252,10 @@ struct ath_atx_tid {
252}; 252};
253 253
254struct ath_node { 254struct ath_node {
255 struct ath_common *common; 255#ifdef CONFIG_ATH9K_DEBUGFS
256 struct list_head list; /* for sc->nodes */
257 struct ieee80211_sta *sta; /* station struct we're part of */
258#endif
256 struct ath_atx_tid tid[WME_NUM_TID]; 259 struct ath_atx_tid tid[WME_NUM_TID];
257 struct ath_atx_ac ac[WME_NUM_AC]; 260 struct ath_atx_ac ac[WME_NUM_AC];
258 u16 maxampdu; 261 u16 maxampdu;
@@ -275,6 +278,11 @@ struct ath_tx_control {
275#define ATH_TX_XRETRY 0x02 278#define ATH_TX_XRETRY 0x02
276#define ATH_TX_BAR 0x04 279#define ATH_TX_BAR 0x04
277 280
281/**
282 * @txq_map: Index is mac80211 queue number. This is
283 * not necessarily the same as the hardware queue number
284 * (axq_qnum).
285 */
278struct ath_tx { 286struct ath_tx {
279 u16 seq_no; 287 u16 seq_no;
280 u32 txqsetup; 288 u32 txqsetup;
@@ -301,6 +309,8 @@ struct ath_rx {
301 struct ath_descdma rxdma; 309 struct ath_descdma rxdma;
302 struct ath_buf *rx_bufptr; 310 struct ath_buf *rx_bufptr;
303 struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX]; 311 struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX];
312
313 struct sk_buff *frag;
304}; 314};
305 315
306int ath_startrecv(struct ath_softc *sc); 316int ath_startrecv(struct ath_softc *sc);
@@ -337,10 +347,10 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid
337 347
338struct ath_vif { 348struct ath_vif {
339 int av_bslot; 349 int av_bslot;
350 bool is_bslot_active;
340 __le64 tsf_adjust; /* TSF adjustment for staggered beacons */ 351 __le64 tsf_adjust; /* TSF adjustment for staggered beacons */
341 enum nl80211_iftype av_opmode; 352 enum nl80211_iftype av_opmode;
342 struct ath_buf *av_bcbuf; 353 struct ath_buf *av_bcbuf;
343 struct ath_tx_control av_btxctl;
344 u8 bssid[ETH_ALEN]; /* current BSSID from config_interface */ 354 u8 bssid[ETH_ALEN]; /* current BSSID from config_interface */
345}; 355};
346 356
@@ -360,7 +370,7 @@ struct ath_vif {
360#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024) 370#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024)
361 371
362struct ath_beacon_config { 372struct ath_beacon_config {
363 u16 beacon_interval; 373 int beacon_interval;
364 u16 listen_interval; 374 u16 listen_interval;
365 u16 dtim_period; 375 u16 dtim_period;
366 u16 bmiss_timeout; 376 u16 bmiss_timeout;
@@ -379,7 +389,6 @@ struct ath_beacon {
379 u32 ast_be_xmit; 389 u32 ast_be_xmit;
380 u64 bc_tstamp; 390 u64 bc_tstamp;
381 struct ieee80211_vif *bslot[ATH_BCBUF]; 391 struct ieee80211_vif *bslot[ATH_BCBUF];
382 struct ath_wiphy *bslot_aphy[ATH_BCBUF];
383 int slottime; 392 int slottime;
384 int slotupdate; 393 int slotupdate;
385 struct ath9k_tx_queue_info beacon_qi; 394 struct ath9k_tx_queue_info beacon_qi;
@@ -390,9 +399,10 @@ struct ath_beacon {
390 399
391void ath_beacon_tasklet(unsigned long data); 400void ath_beacon_tasklet(unsigned long data);
392void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif); 401void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif);
393int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif); 402int ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_vif *vif);
394void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp); 403void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp);
395int ath_beaconq_config(struct ath_softc *sc); 404int ath_beaconq_config(struct ath_softc *sc);
405void ath9k_set_beaconing_status(struct ath_softc *sc, bool status);
396 406
397/*******/ 407/*******/
398/* ANI */ 408/* ANI */
@@ -527,7 +537,6 @@ struct ath_ant_comb {
527#define ATH_CABQ_READY_TIME 80 /* % of beacon interval */ 537#define ATH_CABQ_READY_TIME 80 /* % of beacon interval */
528#define ATH_MAX_SW_RETRIES 10 538#define ATH_MAX_SW_RETRIES 10
529#define ATH_CHAN_MAX 255 539#define ATH_CHAN_MAX 255
530#define IEEE80211_WEP_NKID 4 /* number of key ids */
531 540
532#define ATH_TXPOWER_MAX 100 /* .5 dBm units */ 541#define ATH_TXPOWER_MAX 100 /* .5 dBm units */
533#define ATH_RATE_DUMMY_MARKER 0 542#define ATH_RATE_DUMMY_MARKER 0
@@ -555,27 +564,28 @@ struct ath_ant_comb {
555#define PS_WAIT_FOR_TX_ACK BIT(3) 564#define PS_WAIT_FOR_TX_ACK BIT(3)
556#define PS_BEACON_SYNC BIT(4) 565#define PS_BEACON_SYNC BIT(4)
557 566
558struct ath_wiphy;
559struct ath_rate_table; 567struct ath_rate_table;
560 568
569struct ath9k_vif_iter_data {
570 const u8 *hw_macaddr; /* phy's hardware address, set
571 * before starting iteration for
572 * valid bssid mask.
573 */
574 u8 mask[ETH_ALEN]; /* bssid mask */
575 int naps; /* number of AP vifs */
576 int nmeshes; /* number of mesh vifs */
577 int nstations; /* number of station vifs */
578 int nwds; /* number of nwd vifs */
579 int nadhocs; /* number of adhoc vifs */
580 int nothers; /* number of vifs not specified above. */
581};
582
561struct ath_softc { 583struct ath_softc {
562 struct ieee80211_hw *hw; 584 struct ieee80211_hw *hw;
563 struct device *dev; 585 struct device *dev;
564 586
565 spinlock_t wiphy_lock; /* spinlock to protect ath_wiphy data */
566 struct ath_wiphy *pri_wiphy;
567 struct ath_wiphy **sec_wiphy; /* secondary wiphys (virtual radios); may
568 * have NULL entries */
569 int num_sec_wiphy; /* number of sec_wiphy pointers in the array */
570 int chan_idx; 587 int chan_idx;
571 int chan_is_ht; 588 int chan_is_ht;
572 struct ath_wiphy *next_wiphy;
573 struct work_struct chan_work;
574 int wiphy_select_failures;
575 unsigned long wiphy_select_first_fail;
576 struct delayed_work wiphy_work;
577 unsigned long wiphy_scheduler_int;
578 int wiphy_scheduler_index;
579 struct survey_info *cur_survey; 589 struct survey_info *cur_survey;
580 struct survey_info survey[ATH9K_NUM_CHANNELS]; 590 struct survey_info survey[ATH9K_NUM_CHANNELS];
581 591
@@ -592,14 +602,16 @@ struct ath_softc {
592 struct work_struct hw_check_work; 602 struct work_struct hw_check_work;
593 struct completion paprd_complete; 603 struct completion paprd_complete;
594 604
605 unsigned int hw_busy_count;
606
595 u32 intrstatus; 607 u32 intrstatus;
596 u32 sc_flags; /* SC_OP_* */ 608 u32 sc_flags; /* SC_OP_* */
597 u16 ps_flags; /* PS_* */ 609 u16 ps_flags; /* PS_* */
598 u16 curtxpow; 610 u16 curtxpow;
599 u8 nbcnvifs;
600 u16 nvifs;
601 bool ps_enabled; 611 bool ps_enabled;
602 bool ps_idle; 612 bool ps_idle;
613 short nbcnvifs;
614 short nvifs;
603 unsigned long ps_usecount; 615 unsigned long ps_usecount;
604 616
605 struct ath_config config; 617 struct ath_config config;
@@ -618,13 +630,18 @@ struct ath_softc {
618 int led_on_cnt; 630 int led_on_cnt;
619 int led_off_cnt; 631 int led_off_cnt;
620 632
621 int beacon_interval; 633 struct ath9k_hw_cal_data caldata;
634 int last_rssi;
622 635
623#ifdef CONFIG_ATH9K_DEBUGFS 636#ifdef CONFIG_ATH9K_DEBUGFS
624 struct ath9k_debug debug; 637 struct ath9k_debug debug;
638 spinlock_t nodes_lock;
639 struct list_head nodes; /* basically, stations */
640 unsigned int tx_complete_poll_work_seen;
625#endif 641#endif
626 struct ath_beacon_config cur_beacon_conf; 642 struct ath_beacon_config cur_beacon_conf;
627 struct delayed_work tx_complete_work; 643 struct delayed_work tx_complete_work;
644 struct delayed_work hw_pll_work;
628 struct ath_btcoex btcoex; 645 struct ath_btcoex btcoex;
629 646
630 struct ath_descdma txsdma; 647 struct ath_descdma txsdma;
@@ -632,23 +649,6 @@ struct ath_softc {
632 struct ath_ant_comb ant_comb; 649 struct ath_ant_comb ant_comb;
633}; 650};
634 651
635struct ath_wiphy {
636 struct ath_softc *sc; /* shared for all virtual wiphys */
637 struct ieee80211_hw *hw;
638 struct ath9k_hw_cal_data caldata;
639 enum ath_wiphy_state {
640 ATH_WIPHY_INACTIVE,
641 ATH_WIPHY_ACTIVE,
642 ATH_WIPHY_PAUSING,
643 ATH_WIPHY_PAUSED,
644 ATH_WIPHY_SCAN,
645 } state;
646 bool idle;
647 int chan_idx;
648 int chan_is_ht;
649 int last_rssi;
650};
651
652void ath9k_tasklet(unsigned long data); 652void ath9k_tasklet(unsigned long data);
653int ath_reset(struct ath_softc *sc, bool retry_tx); 653int ath_reset(struct ath_softc *sc, bool retry_tx);
654int ath_cabq_update(struct ath_softc *); 654int ath_cabq_update(struct ath_softc *);
@@ -669,14 +669,13 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
669 const struct ath_bus_ops *bus_ops); 669 const struct ath_bus_ops *bus_ops);
670void ath9k_deinit_device(struct ath_softc *sc); 670void ath9k_deinit_device(struct ath_softc *sc);
671void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw); 671void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
672void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
673 struct ath9k_channel *ichan);
674int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw, 672int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
675 struct ath9k_channel *hchan); 673 struct ath9k_channel *hchan);
676 674
677void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw); 675void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw);
678void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw); 676void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw);
679bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode); 677bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode);
678bool ath9k_uses_beacons(int type);
680 679
681#ifdef CONFIG_PCI 680#ifdef CONFIG_PCI
682int ath_pci_init(void); 681int ath_pci_init(void);
@@ -700,26 +699,12 @@ void ath9k_ps_restore(struct ath_softc *sc);
700u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate); 699u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate);
701 700
702void ath9k_set_bssid_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif); 701void ath9k_set_bssid_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
703int ath9k_wiphy_add(struct ath_softc *sc);
704int ath9k_wiphy_del(struct ath_wiphy *aphy);
705void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, int ftype);
706int ath9k_wiphy_pause(struct ath_wiphy *aphy);
707int ath9k_wiphy_unpause(struct ath_wiphy *aphy);
708int ath9k_wiphy_select(struct ath_wiphy *aphy);
709void ath9k_wiphy_set_scheduler(struct ath_softc *sc, unsigned int msec_int);
710void ath9k_wiphy_chan_work(struct work_struct *work);
711bool ath9k_wiphy_started(struct ath_softc *sc);
712void ath9k_wiphy_pause_all_forced(struct ath_softc *sc,
713 struct ath_wiphy *selected);
714bool ath9k_wiphy_scanning(struct ath_softc *sc);
715void ath9k_wiphy_work(struct work_struct *work);
716bool ath9k_all_wiphys_idle(struct ath_softc *sc);
717void ath9k_set_wiphy_idle(struct ath_wiphy *aphy, bool idle);
718
719void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue);
720bool ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue);
721 702
722void ath_start_rfkill_poll(struct ath_softc *sc); 703void ath_start_rfkill_poll(struct ath_softc *sc);
723extern void ath9k_rfkill_poll_state(struct ieee80211_hw *hw); 704extern void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
705void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
706 struct ieee80211_vif *vif,
707 struct ath9k_vif_iter_data *iter_data);
708
724 709
725#endif /* ATH9K_H */ 710#endif /* ATH9K_H */
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 385ba03134b..a4bdfdb043e 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -112,8 +112,7 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
112 112
113static void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb) 113static void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
114{ 114{
115 struct ath_wiphy *aphy = hw->priv; 115 struct ath_softc *sc = hw->priv;
116 struct ath_softc *sc = aphy->sc;
117 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 116 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
118 struct ath_tx_control txctl; 117 struct ath_tx_control txctl;
119 118
@@ -132,8 +131,7 @@ static void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
132static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw, 131static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
133 struct ieee80211_vif *vif) 132 struct ieee80211_vif *vif)
134{ 133{
135 struct ath_wiphy *aphy = hw->priv; 134 struct ath_softc *sc = hw->priv;
136 struct ath_softc *sc = aphy->sc;
137 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 135 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
138 struct ath_buf *bf; 136 struct ath_buf *bf;
139 struct ath_vif *avp; 137 struct ath_vif *avp;
@@ -142,13 +140,10 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
142 struct ieee80211_tx_info *info; 140 struct ieee80211_tx_info *info;
143 int cabq_depth; 141 int cabq_depth;
144 142
145 if (aphy->state != ATH_WIPHY_ACTIVE)
146 return NULL;
147
148 avp = (void *)vif->drv_priv; 143 avp = (void *)vif->drv_priv;
149 cabq = sc->beacon.cabq; 144 cabq = sc->beacon.cabq;
150 145
151 if (avp->av_bcbuf == NULL) 146 if ((avp->av_bcbuf == NULL) || !avp->is_bslot_active)
152 return NULL; 147 return NULL;
153 148
154 /* Release the old beacon first */ 149 /* Release the old beacon first */
@@ -225,13 +220,13 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
225 return bf; 220 return bf;
226} 221}
227 222
228int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif) 223int ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_vif *vif)
229{ 224{
230 struct ath_softc *sc = aphy->sc;
231 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 225 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
232 struct ath_vif *avp; 226 struct ath_vif *avp;
233 struct ath_buf *bf; 227 struct ath_buf *bf;
234 struct sk_buff *skb; 228 struct sk_buff *skb;
229 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
235 __le64 tstamp; 230 __le64 tstamp;
236 231
237 avp = (void *)vif->drv_priv; 232 avp = (void *)vif->drv_priv;
@@ -244,9 +239,7 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
244 struct ath_buf, list); 239 struct ath_buf, list);
245 list_del(&avp->av_bcbuf->list); 240 list_del(&avp->av_bcbuf->list);
246 241
247 if (sc->sc_ah->opmode == NL80211_IFTYPE_AP || 242 if (ath9k_uses_beacons(vif->type)) {
248 sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC ||
249 sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT) {
250 int slot; 243 int slot;
251 /* 244 /*
252 * Assign the vif to a beacon xmit slot. As 245 * Assign the vif to a beacon xmit slot. As
@@ -256,6 +249,7 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
256 for (slot = 0; slot < ATH_BCBUF; slot++) 249 for (slot = 0; slot < ATH_BCBUF; slot++)
257 if (sc->beacon.bslot[slot] == NULL) { 250 if (sc->beacon.bslot[slot] == NULL) {
258 avp->av_bslot = slot; 251 avp->av_bslot = slot;
252 avp->is_bslot_active = false;
259 253
260 /* NB: keep looking for a double slot */ 254 /* NB: keep looking for a double slot */
261 if (slot == 0 || !sc->beacon.bslot[slot-1]) 255 if (slot == 0 || !sc->beacon.bslot[slot-1])
@@ -263,7 +257,6 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
263 } 257 }
264 BUG_ON(sc->beacon.bslot[avp->av_bslot] != NULL); 258 BUG_ON(sc->beacon.bslot[avp->av_bslot] != NULL);
265 sc->beacon.bslot[avp->av_bslot] = vif; 259 sc->beacon.bslot[avp->av_bslot] = vif;
266 sc->beacon.bslot_aphy[avp->av_bslot] = aphy;
267 sc->nbcnvifs++; 260 sc->nbcnvifs++;
268 } 261 }
269 } 262 }
@@ -281,10 +274,8 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
281 274
282 /* NB: the beacon data buffer must be 32-bit aligned. */ 275 /* NB: the beacon data buffer must be 32-bit aligned. */
283 skb = ieee80211_beacon_get(sc->hw, vif); 276 skb = ieee80211_beacon_get(sc->hw, vif);
284 if (skb == NULL) { 277 if (skb == NULL)
285 ath_dbg(common, ATH_DBG_BEACON, "cannot get skb\n");
286 return -ENOMEM; 278 return -ENOMEM;
287 }
288 279
289 tstamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp; 280 tstamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
290 sc->beacon.bc_tstamp = le64_to_cpu(tstamp); 281 sc->beacon.bc_tstamp = le64_to_cpu(tstamp);
@@ -293,7 +284,7 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
293 u64 tsfadjust; 284 u64 tsfadjust;
294 int intval; 285 int intval;
295 286
296 intval = sc->beacon_interval ? : ATH_DEFAULT_BINTVAL; 287 intval = cur_conf->beacon_interval ? : ATH_DEFAULT_BINTVAL;
297 288
298 /* 289 /*
299 * Calculate the TSF offset for this beacon slot, i.e., the 290 * Calculate the TSF offset for this beacon slot, i.e., the
@@ -325,6 +316,7 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
325 ath_err(common, "dma_mapping_error on beacon alloc\n"); 316 ath_err(common, "dma_mapping_error on beacon alloc\n");
326 return -ENOMEM; 317 return -ENOMEM;
327 } 318 }
319 avp->is_bslot_active = true;
328 320
329 return 0; 321 return 0;
330} 322}
@@ -336,7 +328,6 @@ void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp)
336 328
337 if (avp->av_bslot != -1) { 329 if (avp->av_bslot != -1) {
338 sc->beacon.bslot[avp->av_bslot] = NULL; 330 sc->beacon.bslot[avp->av_bslot] = NULL;
339 sc->beacon.bslot_aphy[avp->av_bslot] = NULL;
340 sc->nbcnvifs--; 331 sc->nbcnvifs--;
341 } 332 }
342 333
@@ -358,11 +349,11 @@ void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp)
358void ath_beacon_tasklet(unsigned long data) 349void ath_beacon_tasklet(unsigned long data)
359{ 350{
360 struct ath_softc *sc = (struct ath_softc *)data; 351 struct ath_softc *sc = (struct ath_softc *)data;
352 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
361 struct ath_hw *ah = sc->sc_ah; 353 struct ath_hw *ah = sc->sc_ah;
362 struct ath_common *common = ath9k_hw_common(ah); 354 struct ath_common *common = ath9k_hw_common(ah);
363 struct ath_buf *bf = NULL; 355 struct ath_buf *bf = NULL;
364 struct ieee80211_vif *vif; 356 struct ieee80211_vif *vif;
365 struct ath_wiphy *aphy;
366 int slot; 357 int slot;
367 u32 bfaddr, bc = 0, tsftu; 358 u32 bfaddr, bc = 0, tsftu;
368 u64 tsf; 359 u64 tsf;
@@ -406,7 +397,7 @@ void ath_beacon_tasklet(unsigned long data)
406 * on the tsf to safeguard against missing an swba. 397 * on the tsf to safeguard against missing an swba.
407 */ 398 */
408 399
409 intval = sc->beacon_interval ? : ATH_DEFAULT_BINTVAL; 400 intval = cur_conf->beacon_interval ? : ATH_DEFAULT_BINTVAL;
410 401
411 tsf = ath9k_hw_gettsf64(ah); 402 tsf = ath9k_hw_gettsf64(ah);
412 tsftu = TSF_TO_TU(tsf>>32, tsf); 403 tsftu = TSF_TO_TU(tsf>>32, tsf);
@@ -420,7 +411,6 @@ void ath_beacon_tasklet(unsigned long data)
420 */ 411 */
421 slot = ATH_BCBUF - slot - 1; 412 slot = ATH_BCBUF - slot - 1;
422 vif = sc->beacon.bslot[slot]; 413 vif = sc->beacon.bslot[slot];
423 aphy = sc->beacon.bslot_aphy[slot];
424 414
425 ath_dbg(common, ATH_DBG_BEACON, 415 ath_dbg(common, ATH_DBG_BEACON,
426 "slot %d [tsf %llu tsftu %u intval %u] vif %p\n", 416 "slot %d [tsf %llu tsftu %u intval %u] vif %p\n",
@@ -428,7 +418,7 @@ void ath_beacon_tasklet(unsigned long data)
428 418
429 bfaddr = 0; 419 bfaddr = 0;
430 if (vif) { 420 if (vif) {
431 bf = ath_beacon_generate(aphy->hw, vif); 421 bf = ath_beacon_generate(sc->hw, vif);
432 if (bf != NULL) { 422 if (bf != NULL) {
433 bfaddr = bf->bf_daddr; 423 bfaddr = bf->bf_daddr;
434 bc = 1; 424 bc = 1;
@@ -720,10 +710,10 @@ void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
720 iftype = sc->sc_ah->opmode; 710 iftype = sc->sc_ah->opmode;
721 } 711 }
722 712
723 cur_conf->listen_interval = 1; 713 cur_conf->listen_interval = 1;
724 cur_conf->dtim_count = 1; 714 cur_conf->dtim_count = 1;
725 cur_conf->bmiss_timeout = 715 cur_conf->bmiss_timeout =
726 ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval; 716 ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
727 717
728 /* 718 /*
729 * It looks like mac80211 may end up using beacon interval of zero in 719 * It looks like mac80211 may end up using beacon interval of zero in
@@ -735,8 +725,9 @@ void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
735 cur_conf->beacon_interval = 100; 725 cur_conf->beacon_interval = 100;
736 726
737 /* 727 /*
738 * Some times we dont parse dtim period from mac80211, in that case 728 * We don't parse dtim period from mac80211 during the driver
739 * use a default value 729 * initialization as it breaks association with hidden-ssid
730 * AP and it causes latency in roaming
740 */ 731 */
741 if (cur_conf->dtim_period == 0) 732 if (cur_conf->dtim_period == 0)
742 cur_conf->dtim_period = 1; 733 cur_conf->dtim_period = 1;
@@ -760,3 +751,36 @@ void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
760 751
761 sc->sc_flags |= SC_OP_BEACONS; 752 sc->sc_flags |= SC_OP_BEACONS;
762} 753}
754
755void ath9k_set_beaconing_status(struct ath_softc *sc, bool status)
756{
757 struct ath_hw *ah = sc->sc_ah;
758 struct ath_vif *avp;
759 int slot;
760 bool found = false;
761
762 ath9k_ps_wakeup(sc);
763 if (status) {
764 for (slot = 0; slot < ATH_BCBUF; slot++) {
765 if (sc->beacon.bslot[slot]) {
766 avp = (void *)sc->beacon.bslot[slot]->drv_priv;
767 if (avp->is_bslot_active) {
768 found = true;
769 break;
770 }
771 }
772 }
773 if (found) {
774 /* Re-enable beaconing */
775 ah->imask |= ATH9K_INT_SWBA;
776 ath9k_hw_set_interrupts(ah, ah->imask);
777 }
778 } else {
779 /* Disable SWBA interrupt */
780 ah->imask &= ~ATH9K_INT_SWBA;
781 ath9k_hw_set_interrupts(ah, ah->imask);
782 tasklet_kill(&sc->bcon_tasklet);
783 ath9k_hw_stoptxdma(ah, sc->beacon.beaconq);
784 }
785 ath9k_ps_restore(sc);
786}
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index b68a1acbddd..b4a92a4313f 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -382,9 +382,8 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
382 s16 default_nf; 382 s16 default_nf;
383 int i, j; 383 int i, j;
384 384
385 if (!ah->caldata) 385 ah->caldata->channel = chan->channel;
386 return; 386 ah->caldata->channelFlags = chan->channelFlags & ~CHANNEL_CW_INT;
387
388 h = ah->caldata->nfCalHist; 387 h = ah->caldata->nfCalHist;
389 default_nf = ath9k_hw_get_default_nf(ah, chan); 388 default_nf = ath9k_hw_get_default_nf(ah, chan);
390 for (i = 0; i < NUM_NF_READINGS; i++) { 389 for (i = 0; i < NUM_NF_READINGS; i++) {
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index df1998d4825..615e68276e7 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -189,6 +189,17 @@ void ath9k_cmn_btcoex_bt_stomp(struct ath_common *common,
189} 189}
190EXPORT_SYMBOL(ath9k_cmn_btcoex_bt_stomp); 190EXPORT_SYMBOL(ath9k_cmn_btcoex_bt_stomp);
191 191
192void ath9k_cmn_update_txpow(struct ath_hw *ah, u16 cur_txpow,
193 u16 new_txpow, u16 *txpower)
194{
195 if (cur_txpow != new_txpow) {
196 ath9k_hw_set_txpowerlimit(ah, new_txpow, false);
197 /* read back in case value is clamped */
198 *txpower = ath9k_hw_regulatory(ah)->power_limit;
199 }
200}
201EXPORT_SYMBOL(ath9k_cmn_update_txpow);
202
192static int __init ath9k_cmn_init(void) 203static int __init ath9k_cmn_init(void)
193{ 204{
194 return 0; 205 return 0;
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index a126bddebb0..b2f7b5f8909 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -23,8 +23,6 @@
23 23
24/* Common header for Atheros 802.11n base driver cores */ 24/* Common header for Atheros 802.11n base driver cores */
25 25
26#define IEEE80211_WEP_NKID 4
27
28#define WME_NUM_TID 16 26#define WME_NUM_TID 16
29#define WME_BA_BMP_SIZE 64 27#define WME_BA_BMP_SIZE 64
30#define WME_MAX_BA WME_BA_BMP_SIZE 28#define WME_MAX_BA WME_BA_BMP_SIZE
@@ -70,3 +68,5 @@ struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
70int ath9k_cmn_count_streams(unsigned int chainmask, int max); 68int ath9k_cmn_count_streams(unsigned int chainmask, int max);
71void ath9k_cmn_btcoex_bt_stomp(struct ath_common *common, 69void ath9k_cmn_btcoex_bt_stomp(struct ath_common *common,
72 enum ath_stomp_type stomp_type); 70 enum ath_stomp_type stomp_type);
71void ath9k_cmn_update_txpow(struct ath_hw *ah, u16 cur_txpow,
72 u16 new_txpow, u16 *txpower);
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 3586c43077a..5cfcf8c235a 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -381,41 +381,40 @@ static const struct file_operations fops_interrupt = {
381 .llseek = default_llseek, 381 .llseek = default_llseek,
382}; 382};
383 383
384static const char * ath_wiphy_state_str(enum ath_wiphy_state state) 384static const char *channel_type_str(enum nl80211_channel_type t)
385{ 385{
386 switch (state) { 386 switch (t) {
387 case ATH_WIPHY_INACTIVE: 387 case NL80211_CHAN_NO_HT:
388 return "INACTIVE"; 388 return "no ht";
389 case ATH_WIPHY_ACTIVE: 389 case NL80211_CHAN_HT20:
390 return "ACTIVE"; 390 return "ht20";
391 case ATH_WIPHY_PAUSING: 391 case NL80211_CHAN_HT40MINUS:
392 return "PAUSING"; 392 return "ht40-";
393 case ATH_WIPHY_PAUSED: 393 case NL80211_CHAN_HT40PLUS:
394 return "PAUSED"; 394 return "ht40+";
395 case ATH_WIPHY_SCAN: 395 default:
396 return "SCAN"; 396 return "???";
397 } 397 }
398 return "?";
399} 398}
400 399
401static ssize_t read_file_wiphy(struct file *file, char __user *user_buf, 400static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
402 size_t count, loff_t *ppos) 401 size_t count, loff_t *ppos)
403{ 402{
404 struct ath_softc *sc = file->private_data; 403 struct ath_softc *sc = file->private_data;
405 struct ath_wiphy *aphy = sc->pri_wiphy; 404 struct ieee80211_channel *chan = sc->hw->conf.channel;
406 struct ieee80211_channel *chan = aphy->hw->conf.channel; 405 struct ieee80211_conf *conf = &(sc->hw->conf);
407 char buf[512]; 406 char buf[512];
408 unsigned int len = 0; 407 unsigned int len = 0;
409 int i;
410 u8 addr[ETH_ALEN]; 408 u8 addr[ETH_ALEN];
411 u32 tmp; 409 u32 tmp;
412 410
413 len += snprintf(buf + len, sizeof(buf) - len, 411 len += snprintf(buf + len, sizeof(buf) - len,
414 "primary: %s (%s chan=%d ht=%d)\n", 412 "%s (chan=%d center-freq: %d MHz channel-type: %d (%s))\n",
415 wiphy_name(sc->pri_wiphy->hw->wiphy), 413 wiphy_name(sc->hw->wiphy),
416 ath_wiphy_state_str(sc->pri_wiphy->state),
417 ieee80211_frequency_to_channel(chan->center_freq), 414 ieee80211_frequency_to_channel(chan->center_freq),
418 aphy->chan_is_ht); 415 chan->center_freq,
416 conf->channel_type,
417 channel_type_str(conf->channel_type));
419 418
420 put_unaligned_le32(REG_READ_D(sc->sc_ah, AR_STA_ID0), addr); 419 put_unaligned_le32(REG_READ_D(sc->sc_ah, AR_STA_ID0), addr);
421 put_unaligned_le16(REG_READ_D(sc->sc_ah, AR_STA_ID1) & 0xffff, addr + 4); 420 put_unaligned_le16(REG_READ_D(sc->sc_ah, AR_STA_ID1) & 0xffff, addr + 4);
@@ -457,156 +456,82 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
457 else 456 else
458 len += snprintf(buf + len, sizeof(buf) - len, "\n"); 457 len += snprintf(buf + len, sizeof(buf) - len, "\n");
459 458
460 /* Put variable-length stuff down here, and check for overflows. */
461 for (i = 0; i < sc->num_sec_wiphy; i++) {
462 struct ath_wiphy *aphy_tmp = sc->sec_wiphy[i];
463 if (aphy_tmp == NULL)
464 continue;
465 chan = aphy_tmp->hw->conf.channel;
466 len += snprintf(buf + len, sizeof(buf) - len,
467 "secondary: %s (%s chan=%d ht=%d)\n",
468 wiphy_name(aphy_tmp->hw->wiphy),
469 ath_wiphy_state_str(aphy_tmp->state),
470 ieee80211_frequency_to_channel(chan->center_freq),
471 aphy_tmp->chan_is_ht);
472 }
473 if (len > sizeof(buf)) 459 if (len > sizeof(buf))
474 len = sizeof(buf); 460 len = sizeof(buf);
475 461
476 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 462 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
477} 463}
478 464
479static struct ath_wiphy * get_wiphy(struct ath_softc *sc, const char *name)
480{
481 int i;
482 if (strcmp(name, wiphy_name(sc->pri_wiphy->hw->wiphy)) == 0)
483 return sc->pri_wiphy;
484 for (i = 0; i < sc->num_sec_wiphy; i++) {
485 struct ath_wiphy *aphy = sc->sec_wiphy[i];
486 if (aphy && strcmp(name, wiphy_name(aphy->hw->wiphy)) == 0)
487 return aphy;
488 }
489 return NULL;
490}
491
492static int del_wiphy(struct ath_softc *sc, const char *name)
493{
494 struct ath_wiphy *aphy = get_wiphy(sc, name);
495 if (!aphy)
496 return -ENOENT;
497 return ath9k_wiphy_del(aphy);
498}
499
500static int pause_wiphy(struct ath_softc *sc, const char *name)
501{
502 struct ath_wiphy *aphy = get_wiphy(sc, name);
503 if (!aphy)
504 return -ENOENT;
505 return ath9k_wiphy_pause(aphy);
506}
507
508static int unpause_wiphy(struct ath_softc *sc, const char *name)
509{
510 struct ath_wiphy *aphy = get_wiphy(sc, name);
511 if (!aphy)
512 return -ENOENT;
513 return ath9k_wiphy_unpause(aphy);
514}
515
516static int select_wiphy(struct ath_softc *sc, const char *name)
517{
518 struct ath_wiphy *aphy = get_wiphy(sc, name);
519 if (!aphy)
520 return -ENOENT;
521 return ath9k_wiphy_select(aphy);
522}
523
524static int schedule_wiphy(struct ath_softc *sc, const char *msec)
525{
526 ath9k_wiphy_set_scheduler(sc, simple_strtoul(msec, NULL, 0));
527 return 0;
528}
529
530static ssize_t write_file_wiphy(struct file *file, const char __user *user_buf,
531 size_t count, loff_t *ppos)
532{
533 struct ath_softc *sc = file->private_data;
534 char buf[50];
535 size_t len;
536
537 len = min(count, sizeof(buf) - 1);
538 if (copy_from_user(buf, user_buf, len))
539 return -EFAULT;
540 buf[len] = '\0';
541 if (len > 0 && buf[len - 1] == '\n')
542 buf[len - 1] = '\0';
543
544 if (strncmp(buf, "add", 3) == 0) {
545 int res = ath9k_wiphy_add(sc);
546 if (res < 0)
547 return res;
548 } else if (strncmp(buf, "del=", 4) == 0) {
549 int res = del_wiphy(sc, buf + 4);
550 if (res < 0)
551 return res;
552 } else if (strncmp(buf, "pause=", 6) == 0) {
553 int res = pause_wiphy(sc, buf + 6);
554 if (res < 0)
555 return res;
556 } else if (strncmp(buf, "unpause=", 8) == 0) {
557 int res = unpause_wiphy(sc, buf + 8);
558 if (res < 0)
559 return res;
560 } else if (strncmp(buf, "select=", 7) == 0) {
561 int res = select_wiphy(sc, buf + 7);
562 if (res < 0)
563 return res;
564 } else if (strncmp(buf, "schedule=", 9) == 0) {
565 int res = schedule_wiphy(sc, buf + 9);
566 if (res < 0)
567 return res;
568 } else
569 return -EOPNOTSUPP;
570
571 return count;
572}
573
574static const struct file_operations fops_wiphy = { 465static const struct file_operations fops_wiphy = {
575 .read = read_file_wiphy, 466 .read = read_file_wiphy,
576 .write = write_file_wiphy,
577 .open = ath9k_debugfs_open, 467 .open = ath9k_debugfs_open,
578 .owner = THIS_MODULE, 468 .owner = THIS_MODULE,
579 .llseek = default_llseek, 469 .llseek = default_llseek,
580}; 470};
581 471
472#define PR_QNUM(_n) sc->tx.txq_map[_n]->axq_qnum
582#define PR(str, elem) \ 473#define PR(str, elem) \
583 do { \ 474 do { \
584 len += snprintf(buf + len, size - len, \ 475 len += snprintf(buf + len, size - len, \
585 "%s%13u%11u%10u%10u\n", str, \ 476 "%s%13u%11u%10u%10u\n", str, \
586 sc->debug.stats.txstats[WME_AC_BE].elem, \ 477 sc->debug.stats.txstats[PR_QNUM(WME_AC_BE)].elem, \
587 sc->debug.stats.txstats[WME_AC_BK].elem, \ 478 sc->debug.stats.txstats[PR_QNUM(WME_AC_BK)].elem, \
588 sc->debug.stats.txstats[WME_AC_VI].elem, \ 479 sc->debug.stats.txstats[PR_QNUM(WME_AC_VI)].elem, \
589 sc->debug.stats.txstats[WME_AC_VO].elem); \ 480 sc->debug.stats.txstats[PR_QNUM(WME_AC_VO)].elem); \
481 if (len >= size) \
482 goto done; \
483} while(0)
484
485#define PRX(str, elem) \
486do { \
487 len += snprintf(buf + len, size - len, \
488 "%s%13u%11u%10u%10u\n", str, \
489 (unsigned int)(sc->tx.txq_map[WME_AC_BE]->elem), \
490 (unsigned int)(sc->tx.txq_map[WME_AC_BK]->elem), \
491 (unsigned int)(sc->tx.txq_map[WME_AC_VI]->elem), \
492 (unsigned int)(sc->tx.txq_map[WME_AC_VO]->elem)); \
493 if (len >= size) \
494 goto done; \
590} while(0) 495} while(0)
591 496
497#define PRQLE(str, elem) \
498do { \
499 len += snprintf(buf + len, size - len, \
500 "%s%13i%11i%10i%10i\n", str, \
501 list_empty(&sc->tx.txq_map[WME_AC_BE]->elem), \
502 list_empty(&sc->tx.txq_map[WME_AC_BK]->elem), \
503 list_empty(&sc->tx.txq_map[WME_AC_VI]->elem), \
504 list_empty(&sc->tx.txq_map[WME_AC_VO]->elem)); \
505 if (len >= size) \
506 goto done; \
507} while (0)
508
592static ssize_t read_file_xmit(struct file *file, char __user *user_buf, 509static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
593 size_t count, loff_t *ppos) 510 size_t count, loff_t *ppos)
594{ 511{
595 struct ath_softc *sc = file->private_data; 512 struct ath_softc *sc = file->private_data;
596 char *buf; 513 char *buf;
597 unsigned int len = 0, size = 2048; 514 unsigned int len = 0, size = 8000;
515 int i;
598 ssize_t retval = 0; 516 ssize_t retval = 0;
517 char tmp[32];
599 518
600 buf = kzalloc(size, GFP_KERNEL); 519 buf = kzalloc(size, GFP_KERNEL);
601 if (buf == NULL) 520 if (buf == NULL)
602 return -ENOMEM; 521 return -ENOMEM;
603 522
604 len += sprintf(buf, "%30s %10s%10s%10s\n\n", "BE", "BK", "VI", "VO"); 523 len += sprintf(buf, "Num-Tx-Queues: %i tx-queues-setup: 0x%x"
524 " poll-work-seen: %u\n"
525 "%30s %10s%10s%10s\n\n",
526 ATH9K_NUM_TX_QUEUES, sc->tx.txqsetup,
527 sc->tx_complete_poll_work_seen,
528 "BE", "BK", "VI", "VO");
605 529
606 PR("MPDUs Queued: ", queued); 530 PR("MPDUs Queued: ", queued);
607 PR("MPDUs Completed: ", completed); 531 PR("MPDUs Completed: ", completed);
608 PR("Aggregates: ", a_aggr); 532 PR("Aggregates: ", a_aggr);
609 PR("AMPDUs Queued: ", a_queued); 533 PR("AMPDUs Queued HW:", a_queued_hw);
534 PR("AMPDUs Queued SW:", a_queued_sw);
610 PR("AMPDUs Completed:", a_completed); 535 PR("AMPDUs Completed:", a_completed);
611 PR("AMPDUs Retried: ", a_retries); 536 PR("AMPDUs Retried: ", a_retries);
612 PR("AMPDUs XRetried: ", a_xretries); 537 PR("AMPDUs XRetried: ", a_xretries);
@@ -618,6 +543,223 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
618 PR("DELIM Underrun: ", delim_underrun); 543 PR("DELIM Underrun: ", delim_underrun);
619 PR("TX-Pkts-All: ", tx_pkts_all); 544 PR("TX-Pkts-All: ", tx_pkts_all);
620 PR("TX-Bytes-All: ", tx_bytes_all); 545 PR("TX-Bytes-All: ", tx_bytes_all);
546 PR("hw-put-tx-buf: ", puttxbuf);
547 PR("hw-tx-start: ", txstart);
548 PR("hw-tx-proc-desc: ", txprocdesc);
549 len += snprintf(buf + len, size - len,
550 "%s%11p%11p%10p%10p\n", "txq-memory-address:",
551 &(sc->tx.txq_map[WME_AC_BE]),
552 &(sc->tx.txq_map[WME_AC_BK]),
553 &(sc->tx.txq_map[WME_AC_VI]),
554 &(sc->tx.txq_map[WME_AC_VO]));
555 if (len >= size)
556 goto done;
557
558 PRX("axq-qnum: ", axq_qnum);
559 PRX("axq-depth: ", axq_depth);
560 PRX("axq-ampdu_depth: ", axq_ampdu_depth);
561 PRX("axq-stopped ", stopped);
562 PRX("tx-in-progress ", axq_tx_inprogress);
563 PRX("pending-frames ", pending_frames);
564 PRX("txq_headidx: ", txq_headidx);
565 PRX("txq_tailidx: ", txq_headidx);
566
567 PRQLE("axq_q empty: ", axq_q);
568 PRQLE("axq_acq empty: ", axq_acq);
569 PRQLE("txq_fifo_pending: ", txq_fifo_pending);
570 for (i = 0; i < ATH_TXFIFO_DEPTH; i++) {
571 snprintf(tmp, sizeof(tmp) - 1, "txq_fifo[%i] empty: ", i);
572 PRQLE(tmp, txq_fifo[i]);
573 }
574
575 /* Print out more detailed queue-info */
576 for (i = 0; i <= WME_AC_BK; i++) {
577 struct ath_txq *txq = &(sc->tx.txq[i]);
578 struct ath_atx_ac *ac;
579 struct ath_atx_tid *tid;
580 if (len >= size)
581 goto done;
582 spin_lock_bh(&txq->axq_lock);
583 if (!list_empty(&txq->axq_acq)) {
584 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac,
585 list);
586 len += snprintf(buf + len, size - len,
587 "txq[%i] first-ac: %p sched: %i\n",
588 i, ac, ac->sched);
589 if (list_empty(&ac->tid_q) || (len >= size))
590 goto done_for;
591 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
592 list);
593 len += snprintf(buf + len, size - len,
594 " first-tid: %p sched: %i paused: %i\n",
595 tid, tid->sched, tid->paused);
596 }
597 done_for:
598 spin_unlock_bh(&txq->axq_lock);
599 }
600
601done:
602 if (len > size)
603 len = size;
604
605 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
606 kfree(buf);
607
608 return retval;
609}
610
611static ssize_t read_file_stations(struct file *file, char __user *user_buf,
612 size_t count, loff_t *ppos)
613{
614 struct ath_softc *sc = file->private_data;
615 char *buf;
616 unsigned int len = 0, size = 64000;
617 struct ath_node *an = NULL;
618 ssize_t retval = 0;
619 int q;
620
621 buf = kzalloc(size, GFP_KERNEL);
622 if (buf == NULL)
623 return -ENOMEM;
624
625 len += snprintf(buf + len, size - len,
626 "Stations:\n"
627 " tid: addr sched paused buf_q-empty an ac\n"
628 " ac: addr sched tid_q-empty txq\n");
629
630 spin_lock(&sc->nodes_lock);
631 list_for_each_entry(an, &sc->nodes, list) {
632 len += snprintf(buf + len, size - len,
633 "%pM\n", an->sta->addr);
634 if (len >= size)
635 goto done;
636
637 for (q = 0; q < WME_NUM_TID; q++) {
638 struct ath_atx_tid *tid = &(an->tid[q]);
639 len += snprintf(buf + len, size - len,
640 " tid: %p %s %s %i %p %p\n",
641 tid, tid->sched ? "sched" : "idle",
642 tid->paused ? "paused" : "running",
643 list_empty(&tid->buf_q),
644 tid->an, tid->ac);
645 if (len >= size)
646 goto done;
647 }
648
649 for (q = 0; q < WME_NUM_AC; q++) {
650 struct ath_atx_ac *ac = &(an->ac[q]);
651 len += snprintf(buf + len, size - len,
652 " ac: %p %s %i %p\n",
653 ac, ac->sched ? "sched" : "idle",
654 list_empty(&ac->tid_q), ac->txq);
655 if (len >= size)
656 goto done;
657 }
658 }
659
660done:
661 spin_unlock(&sc->nodes_lock);
662 if (len > size)
663 len = size;
664
665 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
666 kfree(buf);
667
668 return retval;
669}
670
671static ssize_t read_file_misc(struct file *file, char __user *user_buf,
672 size_t count, loff_t *ppos)
673{
674 struct ath_softc *sc = file->private_data;
675 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
676 struct ath_hw *ah = sc->sc_ah;
677 struct ieee80211_hw *hw = sc->hw;
678 char *buf;
679 unsigned int len = 0, size = 8000;
680 ssize_t retval = 0;
681 const char *tmp;
682 unsigned int reg;
683 struct ath9k_vif_iter_data iter_data;
684
685 ath9k_calculate_iter_data(hw, NULL, &iter_data);
686
687 buf = kzalloc(size, GFP_KERNEL);
688 if (buf == NULL)
689 return -ENOMEM;
690
691 switch (sc->sc_ah->opmode) {
692 case NL80211_IFTYPE_ADHOC:
693 tmp = "ADHOC";
694 break;
695 case NL80211_IFTYPE_MESH_POINT:
696 tmp = "MESH";
697 break;
698 case NL80211_IFTYPE_AP:
699 tmp = "AP";
700 break;
701 case NL80211_IFTYPE_STATION:
702 tmp = "STATION";
703 break;
704 default:
705 tmp = "???";
706 break;
707 }
708
709 len += snprintf(buf + len, size - len,
710 "curbssid: %pM\n"
711 "OP-Mode: %s(%i)\n"
712 "Beacon-Timer-Register: 0x%x\n",
713 common->curbssid,
714 tmp, (int)(sc->sc_ah->opmode),
715 REG_READ(ah, AR_BEACON_PERIOD));
716
717 reg = REG_READ(ah, AR_TIMER_MODE);
718 len += snprintf(buf + len, size - len, "Timer-Mode-Register: 0x%x (",
719 reg);
720 if (reg & AR_TBTT_TIMER_EN)
721 len += snprintf(buf + len, size - len, "TBTT ");
722 if (reg & AR_DBA_TIMER_EN)
723 len += snprintf(buf + len, size - len, "DBA ");
724 if (reg & AR_SWBA_TIMER_EN)
725 len += snprintf(buf + len, size - len, "SWBA ");
726 if (reg & AR_HCF_TIMER_EN)
727 len += snprintf(buf + len, size - len, "HCF ");
728 if (reg & AR_TIM_TIMER_EN)
729 len += snprintf(buf + len, size - len, "TIM ");
730 if (reg & AR_DTIM_TIMER_EN)
731 len += snprintf(buf + len, size - len, "DTIM ");
732 len += snprintf(buf + len, size - len, ")\n");
733
734 reg = sc->sc_ah->imask;
735 len += snprintf(buf + len, size - len, "imask: 0x%x (", reg);
736 if (reg & ATH9K_INT_SWBA)
737 len += snprintf(buf + len, size - len, "SWBA ");
738 if (reg & ATH9K_INT_BMISS)
739 len += snprintf(buf + len, size - len, "BMISS ");
740 if (reg & ATH9K_INT_CST)
741 len += snprintf(buf + len, size - len, "CST ");
742 if (reg & ATH9K_INT_RX)
743 len += snprintf(buf + len, size - len, "RX ");
744 if (reg & ATH9K_INT_RXHP)
745 len += snprintf(buf + len, size - len, "RXHP ");
746 if (reg & ATH9K_INT_RXLP)
747 len += snprintf(buf + len, size - len, "RXLP ");
748 if (reg & ATH9K_INT_BB_WATCHDOG)
749 len += snprintf(buf + len, size - len, "BB_WATCHDOG ");
750 /* there are other IRQs if one wanted to add them. */
751 len += snprintf(buf + len, size - len, ")\n");
752
753 len += snprintf(buf + len, size - len,
754 "VIF Counts: AP: %i STA: %i MESH: %i WDS: %i"
755 " ADHOC: %i OTHER: %i nvifs: %hi beacon-vifs: %hi\n",
756 iter_data.naps, iter_data.nstations, iter_data.nmeshes,
757 iter_data.nwds, iter_data.nadhocs, iter_data.nothers,
758 sc->nvifs, sc->nbcnvifs);
759
760 len += snprintf(buf + len, size - len,
761 "Calculated-BSSID-Mask: %pM\n",
762 iter_data.mask);
621 763
622 if (len > size) 764 if (len > size)
623 len = size; 765 len = size;
@@ -629,9 +771,9 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
629} 771}
630 772
631void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf, 773void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
632 struct ath_tx_status *ts) 774 struct ath_tx_status *ts, struct ath_txq *txq)
633{ 775{
634 int qnum = skb_get_queue_mapping(bf->bf_mpdu); 776 int qnum = txq->axq_qnum;
635 777
636 TX_STAT_INC(qnum, tx_pkts_all); 778 TX_STAT_INC(qnum, tx_pkts_all);
637 sc->debug.stats.txstats[qnum].tx_bytes_all += bf->bf_mpdu->len; 779 sc->debug.stats.txstats[qnum].tx_bytes_all += bf->bf_mpdu->len;
@@ -666,6 +808,20 @@ static const struct file_operations fops_xmit = {
666 .llseek = default_llseek, 808 .llseek = default_llseek,
667}; 809};
668 810
811static const struct file_operations fops_stations = {
812 .read = read_file_stations,
813 .open = ath9k_debugfs_open,
814 .owner = THIS_MODULE,
815 .llseek = default_llseek,
816};
817
818static const struct file_operations fops_misc = {
819 .read = read_file_misc,
820 .open = ath9k_debugfs_open,
821 .owner = THIS_MODULE,
822 .llseek = default_llseek,
823};
824
669static ssize_t read_file_recv(struct file *file, char __user *user_buf, 825static ssize_t read_file_recv(struct file *file, char __user *user_buf,
670 size_t count, loff_t *ppos) 826 size_t count, loff_t *ppos)
671{ 827{
@@ -903,6 +1059,14 @@ int ath9k_init_debug(struct ath_hw *ah)
903 sc, &fops_xmit)) 1059 sc, &fops_xmit))
904 goto err; 1060 goto err;
905 1061
1062 if (!debugfs_create_file("stations", S_IRUSR, sc->debug.debugfs_phy,
1063 sc, &fops_stations))
1064 goto err;
1065
1066 if (!debugfs_create_file("misc", S_IRUSR, sc->debug.debugfs_phy,
1067 sc, &fops_misc))
1068 goto err;
1069
906 if (!debugfs_create_file("recv", S_IRUSR, sc->debug.debugfs_phy, 1070 if (!debugfs_create_file("recv", S_IRUSR, sc->debug.debugfs_phy,
907 sc, &fops_recv)) 1071 sc, &fops_recv))
908 goto err; 1072 goto err;
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 1e5078bd034..59338de0ce1 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -89,7 +89,8 @@ struct ath_interrupt_stats {
89 * @queued: Total MPDUs (non-aggr) queued 89 * @queued: Total MPDUs (non-aggr) queued
90 * @completed: Total MPDUs (non-aggr) completed 90 * @completed: Total MPDUs (non-aggr) completed
91 * @a_aggr: Total no. of aggregates queued 91 * @a_aggr: Total no. of aggregates queued
92 * @a_queued: Total AMPDUs queued 92 * @a_queued_hw: Total AMPDUs queued to hardware
93 * @a_queued_sw: Total AMPDUs queued to software queues
93 * @a_completed: Total AMPDUs completed 94 * @a_completed: Total AMPDUs completed
94 * @a_retries: No. of AMPDUs retried (SW) 95 * @a_retries: No. of AMPDUs retried (SW)
95 * @a_xretries: No. of AMPDUs dropped due to xretries 96 * @a_xretries: No. of AMPDUs dropped due to xretries
@@ -102,6 +103,9 @@ struct ath_interrupt_stats {
102 * @desc_cfg_err: Descriptor configuration errors 103 * @desc_cfg_err: Descriptor configuration errors
103 * @data_urn: TX data underrun errors 104 * @data_urn: TX data underrun errors
104 * @delim_urn: TX delimiter underrun errors 105 * @delim_urn: TX delimiter underrun errors
106 * @puttxbuf: Number of times hardware was given txbuf to write.
107 * @txstart: Number of times hardware was told to start tx.
108 * @txprocdesc: Number of times tx descriptor was processed
105 */ 109 */
106struct ath_tx_stats { 110struct ath_tx_stats {
107 u32 tx_pkts_all; 111 u32 tx_pkts_all;
@@ -109,7 +113,8 @@ struct ath_tx_stats {
109 u32 queued; 113 u32 queued;
110 u32 completed; 114 u32 completed;
111 u32 a_aggr; 115 u32 a_aggr;
112 u32 a_queued; 116 u32 a_queued_hw;
117 u32 a_queued_sw;
113 u32 a_completed; 118 u32 a_completed;
114 u32 a_retries; 119 u32 a_retries;
115 u32 a_xretries; 120 u32 a_xretries;
@@ -119,6 +124,9 @@ struct ath_tx_stats {
119 u32 desc_cfg_err; 124 u32 desc_cfg_err;
120 u32 data_underrun; 125 u32 data_underrun;
121 u32 delim_underrun; 126 u32 delim_underrun;
127 u32 puttxbuf;
128 u32 txstart;
129 u32 txprocdesc;
122}; 130};
123 131
124/** 132/**
@@ -167,7 +175,7 @@ int ath9k_init_debug(struct ath_hw *ah);
167 175
168void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status); 176void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status);
169void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf, 177void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
170 struct ath_tx_status *ts); 178 struct ath_tx_status *ts, struct ath_txq *txq);
171void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs); 179void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs);
172 180
173#else 181#else
@@ -184,7 +192,8 @@ static inline void ath_debug_stat_interrupt(struct ath_softc *sc,
184 192
185static inline void ath_debug_stat_tx(struct ath_softc *sc, 193static inline void ath_debug_stat_tx(struct ath_softc *sc,
186 struct ath_buf *bf, 194 struct ath_buf *bf,
187 struct ath_tx_status *ts) 195 struct ath_tx_status *ts,
196 struct ath_txq *txq)
188{ 197{
189} 198}
190 199
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index d0516315957..8c18bed3a55 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -89,6 +89,38 @@ bool ath9k_hw_get_lower_upper_index(u8 target, u8 *pList, u16 listSize,
89 return false; 89 return false;
90} 90}
91 91
92void ath9k_hw_usb_gen_fill_eeprom(struct ath_hw *ah, u16 *eep_data,
93 int eep_start_loc, int size)
94{
95 int i = 0, j, addr;
96 u32 addrdata[8];
97 u32 data[8];
98
99 for (addr = 0; addr < size; addr++) {
100 addrdata[i] = AR5416_EEPROM_OFFSET +
101 ((addr + eep_start_loc) << AR5416_EEPROM_S);
102 i++;
103 if (i == 8) {
104 REG_READ_MULTI(ah, addrdata, data, i);
105
106 for (j = 0; j < i; j++) {
107 *eep_data = data[j];
108 eep_data++;
109 }
110 i = 0;
111 }
112 }
113
114 if (i != 0) {
115 REG_READ_MULTI(ah, addrdata, data, i);
116
117 for (j = 0; j < i; j++) {
118 *eep_data = data[j];
119 eep_data++;
120 }
121 }
122}
123
92bool ath9k_hw_nvram_read(struct ath_common *common, u32 off, u16 *data) 124bool ath9k_hw_nvram_read(struct ath_common *common, u32 off, u16 *data)
93{ 125{
94 return common->bus_ops->eeprom_read(common, off, data); 126 return common->bus_ops->eeprom_read(common, off, data);
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 58e2ddc927a..bd82447f5b7 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -665,6 +665,8 @@ int16_t ath9k_hw_interpolate(u16 target, u16 srcLeft, u16 srcRight,
665bool ath9k_hw_get_lower_upper_index(u8 target, u8 *pList, u16 listSize, 665bool ath9k_hw_get_lower_upper_index(u8 target, u8 *pList, u16 listSize,
666 u16 *indexL, u16 *indexR); 666 u16 *indexL, u16 *indexR);
667bool ath9k_hw_nvram_read(struct ath_common *common, u32 off, u16 *data); 667bool ath9k_hw_nvram_read(struct ath_common *common, u32 off, u16 *data);
668void ath9k_hw_usb_gen_fill_eeprom(struct ath_hw *ah, u16 *eep_data,
669 int eep_start_loc, int size);
668void ath9k_hw_fill_vpd_table(u8 pwrMin, u8 pwrMax, u8 *pPwrList, 670void ath9k_hw_fill_vpd_table(u8 pwrMin, u8 pwrMax, u8 *pPwrList,
669 u8 *pVpdList, u16 numIntercepts, 671 u8 *pVpdList, u16 numIntercepts,
670 u8 *pRetVpdList); 672 u8 *pRetVpdList);
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index fbdff7e4795..bc77a308c90 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -27,19 +27,13 @@ static int ath9k_hw_4k_get_eeprom_rev(struct ath_hw *ah)
27 return ((ah->eeprom.map4k.baseEepHeader.version) & 0xFFF); 27 return ((ah->eeprom.map4k.baseEepHeader.version) & 0xFFF);
28} 28}
29 29
30static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
31{
32#define SIZE_EEPROM_4K (sizeof(struct ar5416_eeprom_4k) / sizeof(u16)) 30#define SIZE_EEPROM_4K (sizeof(struct ar5416_eeprom_4k) / sizeof(u16))
31
32static bool __ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
33{
33 struct ath_common *common = ath9k_hw_common(ah); 34 struct ath_common *common = ath9k_hw_common(ah);
34 u16 *eep_data = (u16 *)&ah->eeprom.map4k; 35 u16 *eep_data = (u16 *)&ah->eeprom.map4k;
35 int addr, eep_start_loc = 0; 36 int addr, eep_start_loc = 64;
36
37 eep_start_loc = 64;
38
39 if (!ath9k_hw_use_flash(ah)) {
40 ath_dbg(common, ATH_DBG_EEPROM,
41 "Reading from EEPROM, not flash\n");
42 }
43 37
44 for (addr = 0; addr < SIZE_EEPROM_4K; addr++) { 38 for (addr = 0; addr < SIZE_EEPROM_4K; addr++) {
45 if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, eep_data)) { 39 if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, eep_data)) {
@@ -51,9 +45,34 @@ static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
51 } 45 }
52 46
53 return true; 47 return true;
54#undef SIZE_EEPROM_4K
55} 48}
56 49
50static bool __ath9k_hw_usb_4k_fill_eeprom(struct ath_hw *ah)
51{
52 u16 *eep_data = (u16 *)&ah->eeprom.map4k;
53
54 ath9k_hw_usb_gen_fill_eeprom(ah, eep_data, 64, SIZE_EEPROM_4K);
55
56 return true;
57}
58
59static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
60{
61 struct ath_common *common = ath9k_hw_common(ah);
62
63 if (!ath9k_hw_use_flash(ah)) {
64 ath_dbg(common, ATH_DBG_EEPROM,
65 "Reading from EEPROM, not flash\n");
66 }
67
68 if (common->bus_ops->ath_bus_type == ATH_USB)
69 return __ath9k_hw_usb_4k_fill_eeprom(ah);
70 else
71 return __ath9k_hw_4k_fill_eeprom(ah);
72}
73
74#undef SIZE_EEPROM_4K
75
57static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah) 76static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
58{ 77{
59#define EEPROM_4K_SIZE (sizeof(struct ar5416_eeprom_4k) / sizeof(u16)) 78#define EEPROM_4K_SIZE (sizeof(struct ar5416_eeprom_4k) / sizeof(u16))
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index 9b6bc8a953b..8cd8333cc08 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -17,7 +17,7 @@
17#include "hw.h" 17#include "hw.h"
18#include "ar9002_phy.h" 18#include "ar9002_phy.h"
19 19
20#define NUM_EEP_WORDS (sizeof(struct ar9287_eeprom) / sizeof(u16)) 20#define SIZE_EEPROM_AR9287 (sizeof(struct ar9287_eeprom) / sizeof(u16))
21 21
22static int ath9k_hw_ar9287_get_eeprom_ver(struct ath_hw *ah) 22static int ath9k_hw_ar9287_get_eeprom_ver(struct ath_hw *ah)
23{ 23{
@@ -29,25 +29,15 @@ static int ath9k_hw_ar9287_get_eeprom_rev(struct ath_hw *ah)
29 return (ah->eeprom.map9287.baseEepHeader.version) & 0xFFF; 29 return (ah->eeprom.map9287.baseEepHeader.version) & 0xFFF;
30} 30}
31 31
32static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah) 32static bool __ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
33{ 33{
34 struct ar9287_eeprom *eep = &ah->eeprom.map9287; 34 struct ar9287_eeprom *eep = &ah->eeprom.map9287;
35 struct ath_common *common = ath9k_hw_common(ah); 35 struct ath_common *common = ath9k_hw_common(ah);
36 u16 *eep_data; 36 u16 *eep_data;
37 int addr, eep_start_loc; 37 int addr, eep_start_loc = AR9287_EEP_START_LOC;
38 eep_data = (u16 *)eep; 38 eep_data = (u16 *)eep;
39 39
40 if (common->bus_ops->ath_bus_type == ATH_USB) 40 for (addr = 0; addr < SIZE_EEPROM_AR9287; addr++) {
41 eep_start_loc = AR9287_HTC_EEP_START_LOC;
42 else
43 eep_start_loc = AR9287_EEP_START_LOC;
44
45 if (!ath9k_hw_use_flash(ah)) {
46 ath_dbg(common, ATH_DBG_EEPROM,
47 "Reading from EEPROM, not flash\n");
48 }
49
50 for (addr = 0; addr < NUM_EEP_WORDS; addr++) {
51 if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, 41 if (!ath9k_hw_nvram_read(common, addr + eep_start_loc,
52 eep_data)) { 42 eep_data)) {
53 ath_dbg(common, ATH_DBG_EEPROM, 43 ath_dbg(common, ATH_DBG_EEPROM,
@@ -60,6 +50,31 @@ static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
60 return true; 50 return true;
61} 51}
62 52
53static bool __ath9k_hw_usb_ar9287_fill_eeprom(struct ath_hw *ah)
54{
55 u16 *eep_data = (u16 *)&ah->eeprom.map9287;
56
57 ath9k_hw_usb_gen_fill_eeprom(ah, eep_data,
58 AR9287_HTC_EEP_START_LOC,
59 SIZE_EEPROM_AR9287);
60 return true;
61}
62
63static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
64{
65 struct ath_common *common = ath9k_hw_common(ah);
66
67 if (!ath9k_hw_use_flash(ah)) {
68 ath_dbg(common, ATH_DBG_EEPROM,
69 "Reading from EEPROM, not flash\n");
70 }
71
72 if (common->bus_ops->ath_bus_type == ATH_USB)
73 return __ath9k_hw_usb_ar9287_fill_eeprom(ah);
74 else
75 return __ath9k_hw_ar9287_fill_eeprom(ah);
76}
77
63static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah) 78static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah)
64{ 79{
65 u32 sum = 0, el, integer; 80 u32 sum = 0, el, integer;
@@ -86,7 +101,7 @@ static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah)
86 need_swap = true; 101 need_swap = true;
87 eepdata = (u16 *)(&ah->eeprom); 102 eepdata = (u16 *)(&ah->eeprom);
88 103
89 for (addr = 0; addr < NUM_EEP_WORDS; addr++) { 104 for (addr = 0; addr < SIZE_EEPROM_AR9287; addr++) {
90 temp = swab16(*eepdata); 105 temp = swab16(*eepdata);
91 *eepdata = temp; 106 *eepdata = temp;
92 eepdata++; 107 eepdata++;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 749a9360866..fccd87df730 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -86,9 +86,10 @@ static int ath9k_hw_def_get_eeprom_rev(struct ath_hw *ah)
86 return ((ah->eeprom.def.baseEepHeader.version) & 0xFFF); 86 return ((ah->eeprom.def.baseEepHeader.version) & 0xFFF);
87} 87}
88 88
89static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
90{
91#define SIZE_EEPROM_DEF (sizeof(struct ar5416_eeprom_def) / sizeof(u16)) 89#define SIZE_EEPROM_DEF (sizeof(struct ar5416_eeprom_def) / sizeof(u16))
90
91static bool __ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
92{
92 struct ath_common *common = ath9k_hw_common(ah); 93 struct ath_common *common = ath9k_hw_common(ah);
93 u16 *eep_data = (u16 *)&ah->eeprom.def; 94 u16 *eep_data = (u16 *)&ah->eeprom.def;
94 int addr, ar5416_eep_start_loc = 0x100; 95 int addr, ar5416_eep_start_loc = 0x100;
@@ -103,9 +104,34 @@ static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
103 eep_data++; 104 eep_data++;
104 } 105 }
105 return true; 106 return true;
106#undef SIZE_EEPROM_DEF
107} 107}
108 108
109static bool __ath9k_hw_usb_def_fill_eeprom(struct ath_hw *ah)
110{
111 u16 *eep_data = (u16 *)&ah->eeprom.def;
112
113 ath9k_hw_usb_gen_fill_eeprom(ah, eep_data,
114 0x100, SIZE_EEPROM_DEF);
115 return true;
116}
117
118static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
119{
120 struct ath_common *common = ath9k_hw_common(ah);
121
122 if (!ath9k_hw_use_flash(ah)) {
123 ath_dbg(common, ATH_DBG_EEPROM,
124 "Reading from EEPROM, not flash\n");
125 }
126
127 if (common->bus_ops->ath_bus_type == ATH_USB)
128 return __ath9k_hw_usb_def_fill_eeprom(ah);
129 else
130 return __ath9k_hw_def_fill_eeprom(ah);
131}
132
133#undef SIZE_EEPROM_DEF
134
109static int ath9k_hw_def_check_eeprom(struct ath_hw *ah) 135static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
110{ 136{
111 struct ar5416_eeprom_def *eep = 137 struct ar5416_eeprom_def *eep =
@@ -221,9 +247,9 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
221 } 247 }
222 248
223 /* Enable fixup for AR_AN_TOP2 if necessary */ 249 /* Enable fixup for AR_AN_TOP2 if necessary */
224 if (AR_SREV_9280_20_OR_LATER(ah) && 250 if ((ah->hw_version.devid == AR9280_DEVID_PCI) &&
225 (eep->baseEepHeader.version & 0xff) > 0x0a && 251 ((eep->baseEepHeader.version & 0xff) > 0x0a) &&
226 eep->baseEepHeader.pwdclkind == 0) 252 (eep->baseEepHeader.pwdclkind == 0))
227 ah->need_an_top2_fixup = 1; 253 ah->need_an_top2_fixup = 1;
228 254
229 if ((common->bus_ops->ath_bus_type == ATH_USB) && 255 if ((common->bus_ops->ath_bus_type == ATH_USB) &&
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index 13376406924..fb4f17a5183 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -201,8 +201,7 @@ static bool ath_is_rfkill_set(struct ath_softc *sc)
201 201
202void ath9k_rfkill_poll_state(struct ieee80211_hw *hw) 202void ath9k_rfkill_poll_state(struct ieee80211_hw *hw)
203{ 203{
204 struct ath_wiphy *aphy = hw->priv; 204 struct ath_softc *sc = hw->priv;
205 struct ath_softc *sc = aphy->sc;
206 bool blocked = !!ath_is_rfkill_set(sc); 205 bool blocked = !!ath_is_rfkill_set(sc);
207 206
208 wiphy_rfkill_set_hw_state(hw->wiphy, blocked); 207 wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 780ac5eac50..0cb504d7b8c 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -366,7 +366,7 @@ struct ath9k_htc_priv {
366 u16 seq_no; 366 u16 seq_no;
367 u32 bmiss_cnt; 367 u32 bmiss_cnt;
368 368
369 struct ath9k_hw_cal_data caldata[ATH9K_NUM_CHANNELS]; 369 struct ath9k_hw_cal_data caldata;
370 370
371 spinlock_t beacon_lock; 371 spinlock_t beacon_lock;
372 372
@@ -460,7 +460,6 @@ void ath9k_htc_ps_restore(struct ath9k_htc_priv *priv);
460void ath9k_ps_work(struct work_struct *work); 460void ath9k_ps_work(struct work_struct *work);
461bool ath9k_htc_setpower(struct ath9k_htc_priv *priv, 461bool ath9k_htc_setpower(struct ath9k_htc_priv *priv,
462 enum ath9k_power_mode mode); 462 enum ath9k_power_mode mode);
463void ath_update_txpow(struct ath9k_htc_priv *priv);
464 463
465void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv); 464void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv);
466void ath9k_htc_rfkill_poll_state(struct ieee80211_hw *hw); 465void ath9k_htc_rfkill_poll_state(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
index fe70f67aa08..7e630a81b45 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
@@ -389,7 +389,8 @@ void ath9k_htc_radio_enable(struct ieee80211_hw *hw)
389 ret, ah->curchan->channel); 389 ret, ah->curchan->channel);
390 } 390 }
391 391
392 ath_update_txpow(priv); 392 ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit,
393 &priv->curtxpow);
393 394
394 /* Start RX */ 395 /* Start RX */
395 WMI_CMD(WMI_START_RECV_CMDID); 396 WMI_CMD(WMI_START_RECV_CMDID);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 0352f0994ca..a7bc26d1bd6 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -294,6 +294,34 @@ static unsigned int ath9k_regread(void *hw_priv, u32 reg_offset)
294 return be32_to_cpu(val); 294 return be32_to_cpu(val);
295} 295}
296 296
297static void ath9k_multi_regread(void *hw_priv, u32 *addr,
298 u32 *val, u16 count)
299{
300 struct ath_hw *ah = (struct ath_hw *) hw_priv;
301 struct ath_common *common = ath9k_hw_common(ah);
302 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
303 __be32 tmpaddr[8];
304 __be32 tmpval[8];
305 int i, ret;
306
307 for (i = 0; i < count; i++) {
308 tmpaddr[i] = cpu_to_be32(addr[i]);
309 }
310
311 ret = ath9k_wmi_cmd(priv->wmi, WMI_REG_READ_CMDID,
312 (u8 *)tmpaddr , sizeof(u32) * count,
313 (u8 *)tmpval, sizeof(u32) * count,
314 100);
315 if (unlikely(ret)) {
316 ath_dbg(common, ATH_DBG_WMI,
317 "Multiple REGISTER READ FAILED (count: %d)\n", count);
318 }
319
320 for (i = 0; i < count; i++) {
321 val[i] = be32_to_cpu(tmpval[i]);
322 }
323}
324
297static void ath9k_regwrite_single(void *hw_priv, u32 val, u32 reg_offset) 325static void ath9k_regwrite_single(void *hw_priv, u32 val, u32 reg_offset)
298{ 326{
299 struct ath_hw *ah = (struct ath_hw *) hw_priv; 327 struct ath_hw *ah = (struct ath_hw *) hw_priv;
@@ -404,6 +432,7 @@ static void ath9k_regwrite_flush(void *hw_priv)
404 432
405static const struct ath_ops ath9k_common_ops = { 433static const struct ath_ops ath9k_common_ops = {
406 .read = ath9k_regread, 434 .read = ath9k_regread,
435 .multi_read = ath9k_multi_regread,
407 .write = ath9k_regwrite, 436 .write = ath9k_regwrite,
408 .enable_write_buffer = ath9k_enable_regwrite_buffer, 437 .enable_write_buffer = ath9k_enable_regwrite_buffer,
409 .write_flush = ath9k_regwrite_flush, 438 .write_flush = ath9k_regwrite_flush,
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 6bb59958f71..50fde0e1059 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -24,17 +24,6 @@ static struct dentry *ath9k_debugfs_root;
24/* Utilities */ 24/* Utilities */
25/*************/ 25/*************/
26 26
27void ath_update_txpow(struct ath9k_htc_priv *priv)
28{
29 struct ath_hw *ah = priv->ah;
30
31 if (priv->curtxpow != priv->txpowlimit) {
32 ath9k_hw_set_txpowerlimit(ah, priv->txpowlimit, false);
33 /* read back in case value is clamped */
34 priv->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
35 }
36}
37
38/* HACK Alert: Use 11NG for 2.4, use 11NA for 5 */ 27/* HACK Alert: Use 11NG for 2.4, use 11NA for 5 */
39static enum htc_phymode ath9k_htc_get_curmode(struct ath9k_htc_priv *priv, 28static enum htc_phymode ath9k_htc_get_curmode(struct ath9k_htc_priv *priv,
40 struct ath9k_channel *ichan) 29 struct ath9k_channel *ichan)
@@ -121,7 +110,7 @@ void ath9k_htc_reset(struct ath9k_htc_priv *priv)
121 struct ath_hw *ah = priv->ah; 110 struct ath_hw *ah = priv->ah;
122 struct ath_common *common = ath9k_hw_common(ah); 111 struct ath_common *common = ath9k_hw_common(ah);
123 struct ieee80211_channel *channel = priv->hw->conf.channel; 112 struct ieee80211_channel *channel = priv->hw->conf.channel;
124 struct ath9k_hw_cal_data *caldata; 113 struct ath9k_hw_cal_data *caldata = NULL;
125 enum htc_phymode mode; 114 enum htc_phymode mode;
126 __be16 htc_mode; 115 __be16 htc_mode;
127 u8 cmd_rsp; 116 u8 cmd_rsp;
@@ -139,7 +128,7 @@ void ath9k_htc_reset(struct ath9k_htc_priv *priv)
139 WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID); 128 WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
140 WMI_CMD(WMI_STOP_RECV_CMDID); 129 WMI_CMD(WMI_STOP_RECV_CMDID);
141 130
142 caldata = &priv->caldata[channel->hw_value]; 131 caldata = &priv->caldata;
143 ret = ath9k_hw_reset(ah, ah->curchan, caldata, false); 132 ret = ath9k_hw_reset(ah, ah->curchan, caldata, false);
144 if (ret) { 133 if (ret) {
145 ath_err(common, 134 ath_err(common,
@@ -147,7 +136,8 @@ void ath9k_htc_reset(struct ath9k_htc_priv *priv)
147 channel->center_freq, ret); 136 channel->center_freq, ret);
148 } 137 }
149 138
150 ath_update_txpow(priv); 139 ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit,
140 &priv->curtxpow);
151 141
152 WMI_CMD(WMI_START_RECV_CMDID); 142 WMI_CMD(WMI_START_RECV_CMDID);
153 ath9k_host_rx_init(priv); 143 ath9k_host_rx_init(priv);
@@ -179,7 +169,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
179 struct ieee80211_conf *conf = &common->hw->conf; 169 struct ieee80211_conf *conf = &common->hw->conf;
180 bool fastcc; 170 bool fastcc;
181 struct ieee80211_channel *channel = hw->conf.channel; 171 struct ieee80211_channel *channel = hw->conf.channel;
182 struct ath9k_hw_cal_data *caldata; 172 struct ath9k_hw_cal_data *caldata = NULL;
183 enum htc_phymode mode; 173 enum htc_phymode mode;
184 __be16 htc_mode; 174 __be16 htc_mode;
185 u8 cmd_rsp; 175 u8 cmd_rsp;
@@ -202,7 +192,8 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
202 channel->center_freq, conf_is_ht(conf), conf_is_ht40(conf), 192 channel->center_freq, conf_is_ht(conf), conf_is_ht40(conf),
203 fastcc); 193 fastcc);
204 194
205 caldata = &priv->caldata[channel->hw_value]; 195 if (!fastcc)
196 caldata = &priv->caldata;
206 ret = ath9k_hw_reset(ah, hchan, caldata, fastcc); 197 ret = ath9k_hw_reset(ah, hchan, caldata, fastcc);
207 if (ret) { 198 if (ret) {
208 ath_err(common, 199 ath_err(common,
@@ -211,7 +202,8 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
211 goto err; 202 goto err;
212 } 203 }
213 204
214 ath_update_txpow(priv); 205 ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit,
206 &priv->curtxpow);
215 207
216 WMI_CMD(WMI_START_RECV_CMDID); 208 WMI_CMD(WMI_START_RECV_CMDID);
217 if (ret) 209 if (ret)
@@ -987,7 +979,8 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
987 return ret; 979 return ret;
988 } 980 }
989 981
990 ath_update_txpow(priv); 982 ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit,
983 &priv->curtxpow);
991 984
992 mode = ath9k_htc_get_curmode(priv, init_channel); 985 mode = ath9k_htc_get_curmode(priv, init_channel);
993 htc_mode = cpu_to_be16(mode); 986 htc_mode = cpu_to_be16(mode);
@@ -1051,6 +1044,7 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
1051 cancel_work_sync(&priv->fatal_work); 1044 cancel_work_sync(&priv->fatal_work);
1052 cancel_work_sync(&priv->ps_work); 1045 cancel_work_sync(&priv->ps_work);
1053 cancel_delayed_work_sync(&priv->ath9k_led_blink_work); 1046 cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
1047 cancel_delayed_work_sync(&priv->ath9k_ani_work);
1054 ath9k_led_stop_brightness(priv); 1048 ath9k_led_stop_brightness(priv);
1055 1049
1056 mutex_lock(&priv->mutex); 1050 mutex_lock(&priv->mutex);
@@ -1252,7 +1246,8 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
1252 1246
1253 if (changed & IEEE80211_CONF_CHANGE_POWER) { 1247 if (changed & IEEE80211_CONF_CHANGE_POWER) {
1254 priv->txpowlimit = 2 * conf->power_level; 1248 priv->txpowlimit = 2 * conf->power_level;
1255 ath_update_txpow(priv); 1249 ath9k_cmn_update_txpow(priv->ah, priv->curtxpow,
1250 priv->txpowlimit, &priv->curtxpow);
1256 } 1251 }
1257 1252
1258 if (changed & IEEE80211_CONF_CHANGE_IDLE) { 1253 if (changed & IEEE80211_CONF_CHANGE_IDLE) {
@@ -1557,7 +1552,7 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
1557 struct ieee80211_vif *vif, 1552 struct ieee80211_vif *vif,
1558 enum ieee80211_ampdu_mlme_action action, 1553 enum ieee80211_ampdu_mlme_action action,
1559 struct ieee80211_sta *sta, 1554 struct ieee80211_sta *sta,
1560 u16 tid, u16 *ssn) 1555 u16 tid, u16 *ssn, u8 buf_size)
1561{ 1556{
1562 struct ath9k_htc_priv *priv = hw->priv; 1557 struct ath9k_htc_priv *priv = hw->priv;
1563 struct ath9k_htc_sta *ista; 1558 struct ath9k_htc_sta *ista;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 9f01e50d5cd..9a3438174f8 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -495,6 +495,17 @@ static int __ath9k_hw_init(struct ath_hw *ah)
495 if (ah->hw_version.devid == AR5416_AR9100_DEVID) 495 if (ah->hw_version.devid == AR5416_AR9100_DEVID)
496 ah->hw_version.macVersion = AR_SREV_VERSION_9100; 496 ah->hw_version.macVersion = AR_SREV_VERSION_9100;
497 497
498 ath9k_hw_read_revisions(ah);
499
500 /*
501 * Read back AR_WA into a permanent copy and set bits 14 and 17.
502 * We need to do this to avoid RMW of this register. We cannot
503 * read the reg when chip is asleep.
504 */
505 ah->WARegVal = REG_READ(ah, AR_WA);
506 ah->WARegVal |= (AR_WA_D3_L1_DISABLE |
507 AR_WA_ASPM_TIMER_BASED_DISABLE);
508
498 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) { 509 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) {
499 ath_err(common, "Couldn't reset chip\n"); 510 ath_err(common, "Couldn't reset chip\n");
500 return -EIO; 511 return -EIO;
@@ -563,14 +574,6 @@ static int __ath9k_hw_init(struct ath_hw *ah)
563 574
564 ath9k_hw_init_mode_regs(ah); 575 ath9k_hw_init_mode_regs(ah);
565 576
566 /*
567 * Read back AR_WA into a permanent copy and set bits 14 and 17.
568 * We need to do this to avoid RMW of this register. We cannot
569 * read the reg when chip is asleep.
570 */
571 ah->WARegVal = REG_READ(ah, AR_WA);
572 ah->WARegVal |= (AR_WA_D3_L1_DISABLE |
573 AR_WA_ASPM_TIMER_BASED_DISABLE);
574 577
575 if (ah->is_pciexpress) 578 if (ah->is_pciexpress)
576 ath9k_hw_configpcipowersave(ah, 0, 0); 579 ath9k_hw_configpcipowersave(ah, 0, 0);
@@ -668,14 +671,51 @@ static void ath9k_hw_init_qos(struct ath_hw *ah)
668 REGWRITE_BUFFER_FLUSH(ah); 671 REGWRITE_BUFFER_FLUSH(ah);
669} 672}
670 673
674unsigned long ar9003_get_pll_sqsum_dvc(struct ath_hw *ah)
675{
676 REG_WRITE(ah, PLL3, (REG_READ(ah, PLL3) & ~(PLL3_DO_MEAS_MASK)));
677 udelay(100);
678 REG_WRITE(ah, PLL3, (REG_READ(ah, PLL3) | PLL3_DO_MEAS_MASK));
679
680 while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0)
681 udelay(100);
682
683 return (REG_READ(ah, PLL3) & SQSUM_DVC_MASK) >> 3;
684}
685EXPORT_SYMBOL(ar9003_get_pll_sqsum_dvc);
686
687#define DPLL2_KD_VAL 0x3D
688#define DPLL2_KI_VAL 0x06
689#define DPLL3_PHASE_SHIFT_VAL 0x1
690
671static void ath9k_hw_init_pll(struct ath_hw *ah, 691static void ath9k_hw_init_pll(struct ath_hw *ah,
672 struct ath9k_channel *chan) 692 struct ath9k_channel *chan)
673{ 693{
674 u32 pll; 694 u32 pll;
675 695
676 if (AR_SREV_9485(ah)) 696 if (AR_SREV_9485(ah)) {
697 REG_WRITE(ah, AR_RTC_PLL_CONTROL2, 0x886666);
698 REG_WRITE(ah, AR_CH0_DDR_DPLL2, 0x19e82f01);
699
700 REG_RMW_FIELD(ah, AR_CH0_DDR_DPLL3,
701 AR_CH0_DPLL3_PHASE_SHIFT, DPLL3_PHASE_SHIFT_VAL);
702
703 REG_WRITE(ah, AR_RTC_PLL_CONTROL, 0x1142c);
704 udelay(100);
705
677 REG_WRITE(ah, AR_RTC_PLL_CONTROL2, 0x886666); 706 REG_WRITE(ah, AR_RTC_PLL_CONTROL2, 0x886666);
678 707
708 REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
709 AR_CH0_DPLL2_KD, DPLL2_KD_VAL);
710 REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
711 AR_CH0_DPLL2_KI, DPLL2_KI_VAL);
712
713 REG_RMW_FIELD(ah, AR_CH0_BB_DPLL3,
714 AR_CH0_DPLL3_PHASE_SHIFT, DPLL3_PHASE_SHIFT_VAL);
715 REG_WRITE(ah, AR_RTC_PLL_CONTROL, 0x142c);
716 udelay(110);
717 }
718
679 pll = ath9k_hw_compute_pll_control(ah, chan); 719 pll = ath9k_hw_compute_pll_control(ah, chan);
680 720
681 REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll); 721 REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll);
@@ -1060,7 +1100,6 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
1060 REG_WRITE(ah, AR_RC, AR_RC_AHB); 1100 REG_WRITE(ah, AR_RC, AR_RC_AHB);
1061 1101
1062 REG_WRITE(ah, AR_RTC_RESET, 0); 1102 REG_WRITE(ah, AR_RTC_RESET, 0);
1063 udelay(2);
1064 1103
1065 REGWRITE_BUFFER_FLUSH(ah); 1104 REGWRITE_BUFFER_FLUSH(ah);
1066 1105
@@ -1082,8 +1121,6 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
1082 return false; 1121 return false;
1083 } 1122 }
1084 1123
1085 ath9k_hw_read_revisions(ah);
1086
1087 return ath9k_hw_set_reset(ah, ATH9K_RESET_WARM); 1124 return ath9k_hw_set_reset(ah, ATH9K_RESET_WARM);
1088} 1125}
1089 1126
@@ -1348,8 +1385,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1348 ath9k_hw_spur_mitigate_freq(ah, chan); 1385 ath9k_hw_spur_mitigate_freq(ah, chan);
1349 ah->eep_ops->set_board_values(ah, chan); 1386 ah->eep_ops->set_board_values(ah, chan);
1350 1387
1351 ath9k_hw_set_operating_mode(ah, ah->opmode);
1352
1353 ENABLE_REGWRITE_BUFFER(ah); 1388 ENABLE_REGWRITE_BUFFER(ah);
1354 1389
1355 REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(common->macaddr)); 1390 REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(common->macaddr));
@@ -1367,6 +1402,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1367 1402
1368 REGWRITE_BUFFER_FLUSH(ah); 1403 REGWRITE_BUFFER_FLUSH(ah);
1369 1404
1405 ath9k_hw_set_operating_mode(ah, ah->opmode);
1406
1370 r = ath9k_hw_rf_set_freq(ah, chan); 1407 r = ath9k_hw_rf_set_freq(ah, chan);
1371 if (r) 1408 if (r)
1372 return r; 1409 return r;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index ea9fde67064..ef79f4c876c 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -70,6 +70,9 @@
70#define REG_READ(_ah, _reg) \ 70#define REG_READ(_ah, _reg) \
71 ath9k_hw_common(_ah)->ops->read((_ah), (_reg)) 71 ath9k_hw_common(_ah)->ops->read((_ah), (_reg))
72 72
73#define REG_READ_MULTI(_ah, _addr, _val, _cnt) \
74 ath9k_hw_common(_ah)->ops->multi_read((_ah), (_addr), (_val), (_cnt))
75
73#define ENABLE_REGWRITE_BUFFER(_ah) \ 76#define ENABLE_REGWRITE_BUFFER(_ah) \
74 do { \ 77 do { \
75 if (ath9k_hw_common(_ah)->ops->enable_write_buffer) \ 78 if (ath9k_hw_common(_ah)->ops->enable_write_buffer) \
@@ -926,6 +929,7 @@ void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64);
926void ath9k_hw_reset_tsf(struct ath_hw *ah); 929void ath9k_hw_reset_tsf(struct ath_hw *ah);
927void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting); 930void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting);
928void ath9k_hw_init_global_settings(struct ath_hw *ah); 931void ath9k_hw_init_global_settings(struct ath_hw *ah);
932unsigned long ar9003_get_pll_sqsum_dvc(struct ath_hw *ah);
929void ath9k_hw_set11nmac2040(struct ath_hw *ah); 933void ath9k_hw_set11nmac2040(struct ath_hw *ah);
930void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period); 934void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period);
931void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah, 935void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index a033d01bf8a..f66c882a39e 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -250,8 +250,7 @@ static int ath9k_reg_notifier(struct wiphy *wiphy,
250 struct regulatory_request *request) 250 struct regulatory_request *request)
251{ 251{
252 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); 252 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
253 struct ath_wiphy *aphy = hw->priv; 253 struct ath_softc *sc = hw->priv;
254 struct ath_softc *sc = aphy->sc;
255 struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah); 254 struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
256 255
257 return ath_reg_notifier_apply(wiphy, request, reg); 256 return ath_reg_notifier_apply(wiphy, request, reg);
@@ -438,9 +437,10 @@ static int ath9k_init_queues(struct ath_softc *sc)
438 sc->config.cabqReadytime = ATH_CABQ_READY_TIME; 437 sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
439 ath_cabq_update(sc); 438 ath_cabq_update(sc);
440 439
441 for (i = 0; i < WME_NUM_AC; i++) 440 for (i = 0; i < WME_NUM_AC; i++) {
442 sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i); 441 sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
443 442 sc->tx.txq_map[i]->mac80211_qnum = i;
443 }
444 return 0; 444 return 0;
445} 445}
446 446
@@ -512,10 +512,8 @@ static void ath9k_init_misc(struct ath_softc *sc)
512 512
513 sc->beacon.slottime = ATH9K_SLOT_TIME_9; 513 sc->beacon.slottime = ATH9K_SLOT_TIME_9;
514 514
515 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) { 515 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++)
516 sc->beacon.bslot[i] = NULL; 516 sc->beacon.bslot[i] = NULL;
517 sc->beacon.bslot_aphy[i] = NULL;
518 }
519 517
520 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) 518 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
521 sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT; 519 sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT;
@@ -533,6 +531,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
533 if (!ah) 531 if (!ah)
534 return -ENOMEM; 532 return -ENOMEM;
535 533
534 ah->hw = sc->hw;
536 ah->hw_version.devid = devid; 535 ah->hw_version.devid = devid;
537 ah->hw_version.subsysid = subsysid; 536 ah->hw_version.subsysid = subsysid;
538 sc->sc_ah = ah; 537 sc->sc_ah = ah;
@@ -550,10 +549,13 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
550 common->btcoex_enabled = ath9k_btcoex_enable == 1; 549 common->btcoex_enabled = ath9k_btcoex_enable == 1;
551 spin_lock_init(&common->cc_lock); 550 spin_lock_init(&common->cc_lock);
552 551
553 spin_lock_init(&sc->wiphy_lock);
554 spin_lock_init(&sc->sc_serial_rw); 552 spin_lock_init(&sc->sc_serial_rw);
555 spin_lock_init(&sc->sc_pm_lock); 553 spin_lock_init(&sc->sc_pm_lock);
556 mutex_init(&sc->mutex); 554 mutex_init(&sc->mutex);
555#ifdef CONFIG_ATH9K_DEBUGFS
556 spin_lock_init(&sc->nodes_lock);
557 INIT_LIST_HEAD(&sc->nodes);
558#endif
557 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc); 559 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
558 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet, 560 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
559 (unsigned long)sc); 561 (unsigned long)sc);
@@ -695,7 +697,6 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
695 const struct ath_bus_ops *bus_ops) 697 const struct ath_bus_ops *bus_ops)
696{ 698{
697 struct ieee80211_hw *hw = sc->hw; 699 struct ieee80211_hw *hw = sc->hw;
698 struct ath_wiphy *aphy = hw->priv;
699 struct ath_common *common; 700 struct ath_common *common;
700 struct ath_hw *ah; 701 struct ath_hw *ah;
701 int error = 0; 702 int error = 0;
@@ -750,10 +751,7 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
750 751
751 INIT_WORK(&sc->hw_check_work, ath_hw_check); 752 INIT_WORK(&sc->hw_check_work, ath_hw_check);
752 INIT_WORK(&sc->paprd_work, ath_paprd_calibrate); 753 INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
753 INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work); 754 sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
754 INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
755 sc->wiphy_scheduler_int = msecs_to_jiffies(500);
756 aphy->last_rssi = ATH_RSSI_DUMMY_MARKER;
757 755
758 ath_init_leds(sc); 756 ath_init_leds(sc);
759 ath_start_rfkill_poll(sc); 757 ath_start_rfkill_poll(sc);
@@ -805,7 +803,6 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
805void ath9k_deinit_device(struct ath_softc *sc) 803void ath9k_deinit_device(struct ath_softc *sc)
806{ 804{
807 struct ieee80211_hw *hw = sc->hw; 805 struct ieee80211_hw *hw = sc->hw;
808 int i = 0;
809 806
810 ath9k_ps_wakeup(sc); 807 ath9k_ps_wakeup(sc);
811 808
@@ -814,20 +811,10 @@ void ath9k_deinit_device(struct ath_softc *sc)
814 811
815 ath9k_ps_restore(sc); 812 ath9k_ps_restore(sc);
816 813
817 for (i = 0; i < sc->num_sec_wiphy; i++) {
818 struct ath_wiphy *aphy = sc->sec_wiphy[i];
819 if (aphy == NULL)
820 continue;
821 sc->sec_wiphy[i] = NULL;
822 ieee80211_unregister_hw(aphy->hw);
823 ieee80211_free_hw(aphy->hw);
824 }
825
826 ieee80211_unregister_hw(hw); 814 ieee80211_unregister_hw(hw);
827 ath_rx_cleanup(sc); 815 ath_rx_cleanup(sc);
828 ath_tx_cleanup(sc); 816 ath_tx_cleanup(sc);
829 ath9k_deinit_softc(sc); 817 ath9k_deinit_softc(sc);
830 kfree(sc->sec_wiphy);
831} 818}
832 819
833void ath_descdma_cleanup(struct ath_softc *sc, 820void ath_descdma_cleanup(struct ath_softc *sc,
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 2915b11edef..5efc869d65f 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -690,17 +690,23 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
690 rs->rs_flags |= ATH9K_RX_DECRYPT_BUSY; 690 rs->rs_flags |= ATH9K_RX_DECRYPT_BUSY;
691 691
692 if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) { 692 if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
693 /*
694 * Treat these errors as mutually exclusive to avoid spurious
695 * extra error reports from the hardware. If a CRC error is
696 * reported, then decryption and MIC errors are irrelevant,
697 * the frame is going to be dropped either way
698 */
693 if (ads.ds_rxstatus8 & AR_CRCErr) 699 if (ads.ds_rxstatus8 & AR_CRCErr)
694 rs->rs_status |= ATH9K_RXERR_CRC; 700 rs->rs_status |= ATH9K_RXERR_CRC;
695 if (ads.ds_rxstatus8 & AR_PHYErr) { 701 else if (ads.ds_rxstatus8 & AR_PHYErr) {
696 rs->rs_status |= ATH9K_RXERR_PHY; 702 rs->rs_status |= ATH9K_RXERR_PHY;
697 phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode); 703 phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
698 rs->rs_phyerr = phyerr; 704 rs->rs_phyerr = phyerr;
699 } 705 } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
700 if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
701 rs->rs_status |= ATH9K_RXERR_DECRYPT; 706 rs->rs_status |= ATH9K_RXERR_DECRYPT;
702 if (ads.ds_rxstatus8 & AR_MichaelErr) 707 else if (ads.ds_rxstatus8 & AR_MichaelErr)
703 rs->rs_status |= ATH9K_RXERR_MIC; 708 rs->rs_status |= ATH9K_RXERR_MIC;
709
704 if (ads.ds_rxstatus8 & AR_KeyMiss) 710 if (ads.ds_rxstatus8 & AR_KeyMiss)
705 rs->rs_status |= ATH9K_RXERR_DECRYPT; 711 rs->rs_status |= ATH9K_RXERR_DECRYPT;
706 } 712 }
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 7512f97e8f4..04d58ae923b 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -639,6 +639,8 @@ enum ath9k_rx_filter {
639 ATH9K_RX_FILTER_PHYERR = 0x00000100, 639 ATH9K_RX_FILTER_PHYERR = 0x00000100,
640 ATH9K_RX_FILTER_MYBEACON = 0x00000200, 640 ATH9K_RX_FILTER_MYBEACON = 0x00000200,
641 ATH9K_RX_FILTER_COMP_BAR = 0x00000400, 641 ATH9K_RX_FILTER_COMP_BAR = 0x00000400,
642 ATH9K_RX_FILTER_COMP_BA = 0x00000800,
643 ATH9K_RX_FILTER_UNCOMP_BA_BAR = 0x00001000,
642 ATH9K_RX_FILTER_PSPOLL = 0x00004000, 644 ATH9K_RX_FILTER_PSPOLL = 0x00004000,
643 ATH9K_RX_FILTER_PHYRADAR = 0x00002000, 645 ATH9K_RX_FILTER_PHYRADAR = 0x00002000,
644 ATH9K_RX_FILTER_MCAST_BCAST_ALL = 0x00008000, 646 ATH9K_RX_FILTER_MCAST_BCAST_ALL = 0x00008000,
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index a09d15f7aa6..a71550049d8 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -15,20 +15,10 @@
15 */ 15 */
16 16
17#include <linux/nl80211.h> 17#include <linux/nl80211.h>
18#include <linux/delay.h>
18#include "ath9k.h" 19#include "ath9k.h"
19#include "btcoex.h" 20#include "btcoex.h"
20 21
21static void ath_update_txpow(struct ath_softc *sc)
22{
23 struct ath_hw *ah = sc->sc_ah;
24
25 if (sc->curtxpow != sc->config.txpowlimit) {
26 ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit, false);
27 /* read back in case value is clamped */
28 sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
29 }
30}
31
32static u8 parse_mpdudensity(u8 mpdudensity) 22static u8 parse_mpdudensity(u8 mpdudensity)
33{ 23{
34 /* 24 /*
@@ -64,17 +54,19 @@ static u8 parse_mpdudensity(u8 mpdudensity)
64 } 54 }
65} 55}
66 56
67static struct ath9k_channel *ath_get_curchannel(struct ath_softc *sc, 57static bool ath9k_has_pending_frames(struct ath_softc *sc, struct ath_txq *txq)
68 struct ieee80211_hw *hw)
69{ 58{
70 struct ieee80211_channel *curchan = hw->conf.channel; 59 bool pending = false;
71 struct ath9k_channel *channel; 60
72 u8 chan_idx; 61 spin_lock_bh(&txq->axq_lock);
73 62
74 chan_idx = curchan->hw_value; 63 if (txq->axq_depth || !list_empty(&txq->axq_acq))
75 channel = &sc->sc_ah->channels[chan_idx]; 64 pending = true;
76 ath9k_update_ichannel(sc, hw, channel); 65 else if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
77 return channel; 66 pending = !list_empty(&txq->txq_fifo_pending);
67
68 spin_unlock_bh(&txq->axq_lock);
69 return pending;
78} 70}
79 71
80bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode) 72bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode)
@@ -177,7 +169,12 @@ static void ath_update_survey_nf(struct ath_softc *sc, int channel)
177 } 169 }
178} 170}
179 171
180static void ath_update_survey_stats(struct ath_softc *sc) 172/*
173 * Updates the survey statistics and returns the busy time since last
174 * update in %, if the measurement duration was long enough for the
175 * result to be useful, -1 otherwise.
176 */
177static int ath_update_survey_stats(struct ath_softc *sc)
181{ 178{
182 struct ath_hw *ah = sc->sc_ah; 179 struct ath_hw *ah = sc->sc_ah;
183 struct ath_common *common = ath9k_hw_common(ah); 180 struct ath_common *common = ath9k_hw_common(ah);
@@ -185,9 +182,10 @@ static void ath_update_survey_stats(struct ath_softc *sc)
185 struct survey_info *survey = &sc->survey[pos]; 182 struct survey_info *survey = &sc->survey[pos];
186 struct ath_cycle_counters *cc = &common->cc_survey; 183 struct ath_cycle_counters *cc = &common->cc_survey;
187 unsigned int div = common->clockrate * 1000; 184 unsigned int div = common->clockrate * 1000;
185 int ret = 0;
188 186
189 if (!ah->curchan) 187 if (!ah->curchan)
190 return; 188 return -1;
191 189
192 if (ah->power_mode == ATH9K_PM_AWAKE) 190 if (ah->power_mode == ATH9K_PM_AWAKE)
193 ath_hw_cycle_counters_update(common); 191 ath_hw_cycle_counters_update(common);
@@ -202,9 +200,18 @@ static void ath_update_survey_stats(struct ath_softc *sc)
202 survey->channel_time_rx += cc->rx_frame / div; 200 survey->channel_time_rx += cc->rx_frame / div;
203 survey->channel_time_tx += cc->tx_frame / div; 201 survey->channel_time_tx += cc->tx_frame / div;
204 } 202 }
203
204 if (cc->cycles < div)
205 return -1;
206
207 if (cc->cycles > 0)
208 ret = cc->rx_busy * 100 / cc->cycles;
209
205 memset(cc, 0, sizeof(*cc)); 210 memset(cc, 0, sizeof(*cc));
206 211
207 ath_update_survey_nf(sc, pos); 212 ath_update_survey_nf(sc, pos);
213
214 return ret;
208} 215}
209 216
210/* 217/*
@@ -215,7 +222,6 @@ static void ath_update_survey_stats(struct ath_softc *sc)
215int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw, 222int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
216 struct ath9k_channel *hchan) 223 struct ath9k_channel *hchan)
217{ 224{
218 struct ath_wiphy *aphy = hw->priv;
219 struct ath_hw *ah = sc->sc_ah; 225 struct ath_hw *ah = sc->sc_ah;
220 struct ath_common *common = ath9k_hw_common(ah); 226 struct ath_common *common = ath9k_hw_common(ah);
221 struct ieee80211_conf *conf = &common->hw->conf; 227 struct ieee80211_conf *conf = &common->hw->conf;
@@ -227,10 +233,13 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
227 if (sc->sc_flags & SC_OP_INVALID) 233 if (sc->sc_flags & SC_OP_INVALID)
228 return -EIO; 234 return -EIO;
229 235
236 sc->hw_busy_count = 0;
237
230 del_timer_sync(&common->ani.timer); 238 del_timer_sync(&common->ani.timer);
231 cancel_work_sync(&sc->paprd_work); 239 cancel_work_sync(&sc->paprd_work);
232 cancel_work_sync(&sc->hw_check_work); 240 cancel_work_sync(&sc->hw_check_work);
233 cancel_delayed_work_sync(&sc->tx_complete_work); 241 cancel_delayed_work_sync(&sc->tx_complete_work);
242 cancel_delayed_work_sync(&sc->hw_pll_work);
234 243
235 ath9k_ps_wakeup(sc); 244 ath9k_ps_wakeup(sc);
236 245
@@ -251,6 +260,9 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
251 if (!ath_stoprecv(sc)) 260 if (!ath_stoprecv(sc))
252 stopped = false; 261 stopped = false;
253 262
263 if (!ath9k_hw_check_alive(ah))
264 stopped = false;
265
254 /* XXX: do not flush receive queue here. We don't want 266 /* XXX: do not flush receive queue here. We don't want
255 * to flush data frames already in queue because of 267 * to flush data frames already in queue because of
256 * changing channel. */ 268 * changing channel. */
@@ -259,7 +271,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
259 fastcc = false; 271 fastcc = false;
260 272
261 if (!(sc->sc_flags & SC_OP_OFFCHANNEL)) 273 if (!(sc->sc_flags & SC_OP_OFFCHANNEL))
262 caldata = &aphy->caldata; 274 caldata = &sc->caldata;
263 275
264 ath_dbg(common, ATH_DBG_CONFIG, 276 ath_dbg(common, ATH_DBG_CONFIG,
265 "(%u MHz) -> (%u MHz), conf_is_ht40: %d fastcc: %d\n", 277 "(%u MHz) -> (%u MHz), conf_is_ht40: %d fastcc: %d\n",
@@ -281,17 +293,21 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
281 goto ps_restore; 293 goto ps_restore;
282 } 294 }
283 295
284 ath_update_txpow(sc); 296 ath9k_cmn_update_txpow(ah, sc->curtxpow,
297 sc->config.txpowlimit, &sc->curtxpow);
285 ath9k_hw_set_interrupts(ah, ah->imask); 298 ath9k_hw_set_interrupts(ah, ah->imask);
286 299
287 if (!(sc->sc_flags & (SC_OP_OFFCHANNEL))) { 300 if (!(sc->sc_flags & (SC_OP_OFFCHANNEL))) {
288 if (sc->sc_flags & SC_OP_BEACONS) 301 if (sc->sc_flags & SC_OP_BEACONS)
289 ath_beacon_config(sc, NULL); 302 ath_beacon_config(sc, NULL);
290 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0); 303 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
304 ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/2);
291 ath_start_ani(common); 305 ath_start_ani(common);
292 } 306 }
293 307
294 ps_restore: 308 ps_restore:
309 ieee80211_wake_queues(hw);
310
295 spin_unlock_bh(&sc->sc_pcu_lock); 311 spin_unlock_bh(&sc->sc_pcu_lock);
296 312
297 ath9k_ps_restore(sc); 313 ath9k_ps_restore(sc);
@@ -549,6 +565,12 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta)
549 struct ath_hw *ah = sc->sc_ah; 565 struct ath_hw *ah = sc->sc_ah;
550 an = (struct ath_node *)sta->drv_priv; 566 an = (struct ath_node *)sta->drv_priv;
551 567
568#ifdef CONFIG_ATH9K_DEBUGFS
569 spin_lock(&sc->nodes_lock);
570 list_add(&an->list, &sc->nodes);
571 spin_unlock(&sc->nodes_lock);
572 an->sta = sta;
573#endif
552 if ((ah->caps.hw_caps) & ATH9K_HW_CAP_APM) 574 if ((ah->caps.hw_caps) & ATH9K_HW_CAP_APM)
553 sc->sc_flags |= SC_OP_ENABLE_APM; 575 sc->sc_flags |= SC_OP_ENABLE_APM;
554 576
@@ -564,6 +586,13 @@ static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
564{ 586{
565 struct ath_node *an = (struct ath_node *)sta->drv_priv; 587 struct ath_node *an = (struct ath_node *)sta->drv_priv;
566 588
589#ifdef CONFIG_ATH9K_DEBUGFS
590 spin_lock(&sc->nodes_lock);
591 list_del(&an->list);
592 spin_unlock(&sc->nodes_lock);
593 an->sta = NULL;
594#endif
595
567 if (sc->sc_flags & SC_OP_TXAGGR) 596 if (sc->sc_flags & SC_OP_TXAGGR)
568 ath_tx_node_cleanup(sc, an); 597 ath_tx_node_cleanup(sc, an);
569} 598}
@@ -571,17 +600,25 @@ static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
571void ath_hw_check(struct work_struct *work) 600void ath_hw_check(struct work_struct *work)
572{ 601{
573 struct ath_softc *sc = container_of(work, struct ath_softc, hw_check_work); 602 struct ath_softc *sc = container_of(work, struct ath_softc, hw_check_work);
574 int i; 603 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
604 unsigned long flags;
605 int busy;
575 606
576 ath9k_ps_wakeup(sc); 607 ath9k_ps_wakeup(sc);
608 if (ath9k_hw_check_alive(sc->sc_ah))
609 goto out;
577 610
578 for (i = 0; i < 3; i++) { 611 spin_lock_irqsave(&common->cc_lock, flags);
579 if (ath9k_hw_check_alive(sc->sc_ah)) 612 busy = ath_update_survey_stats(sc);
580 goto out; 613 spin_unlock_irqrestore(&common->cc_lock, flags);
581 614
582 msleep(1); 615 ath_dbg(common, ATH_DBG_RESET, "Possible baseband hang, "
583 } 616 "busy=%d (try %d)\n", busy, sc->hw_busy_count + 1);
584 ath_reset(sc, true); 617 if (busy >= 99) {
618 if (++sc->hw_busy_count >= 3)
619 ath_reset(sc, true);
620 } else if (busy >= 0)
621 sc->hw_busy_count = 0;
585 622
586out: 623out:
587 ath9k_ps_restore(sc); 624 ath9k_ps_restore(sc);
@@ -604,7 +641,15 @@ void ath9k_tasklet(unsigned long data)
604 ath9k_ps_wakeup(sc); 641 ath9k_ps_wakeup(sc);
605 spin_lock(&sc->sc_pcu_lock); 642 spin_lock(&sc->sc_pcu_lock);
606 643
607 if (!ath9k_hw_check_alive(ah)) 644 /*
645 * Only run the baseband hang check if beacons stop working in AP or
646 * IBSS mode, because it has a high false positive rate. For station
647 * mode it should not be necessary, since the upper layers will detect
648 * this through a beacon miss automatically and the following channel
649 * change will trigger a hardware reset anyway
650 */
651 if (ath9k_hw_numtxpending(ah, sc->beacon.beaconq) != 0 &&
652 !ath9k_hw_check_alive(ah))
608 ieee80211_queue_work(sc->hw, &sc->hw_check_work); 653 ieee80211_queue_work(sc->hw, &sc->hw_check_work);
609 654
610 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 655 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
@@ -783,54 +828,11 @@ chip_reset:
783#undef SCHED_INTR 828#undef SCHED_INTR
784} 829}
785 830
786static u32 ath_get_extchanmode(struct ath_softc *sc,
787 struct ieee80211_channel *chan,
788 enum nl80211_channel_type channel_type)
789{
790 u32 chanmode = 0;
791
792 switch (chan->band) {
793 case IEEE80211_BAND_2GHZ:
794 switch(channel_type) {
795 case NL80211_CHAN_NO_HT:
796 case NL80211_CHAN_HT20:
797 chanmode = CHANNEL_G_HT20;
798 break;
799 case NL80211_CHAN_HT40PLUS:
800 chanmode = CHANNEL_G_HT40PLUS;
801 break;
802 case NL80211_CHAN_HT40MINUS:
803 chanmode = CHANNEL_G_HT40MINUS;
804 break;
805 }
806 break;
807 case IEEE80211_BAND_5GHZ:
808 switch(channel_type) {
809 case NL80211_CHAN_NO_HT:
810 case NL80211_CHAN_HT20:
811 chanmode = CHANNEL_A_HT20;
812 break;
813 case NL80211_CHAN_HT40PLUS:
814 chanmode = CHANNEL_A_HT40PLUS;
815 break;
816 case NL80211_CHAN_HT40MINUS:
817 chanmode = CHANNEL_A_HT40MINUS;
818 break;
819 }
820 break;
821 default:
822 break;
823 }
824
825 return chanmode;
826}
827
828static void ath9k_bss_assoc_info(struct ath_softc *sc, 831static void ath9k_bss_assoc_info(struct ath_softc *sc,
829 struct ieee80211_hw *hw, 832 struct ieee80211_hw *hw,
830 struct ieee80211_vif *vif, 833 struct ieee80211_vif *vif,
831 struct ieee80211_bss_conf *bss_conf) 834 struct ieee80211_bss_conf *bss_conf)
832{ 835{
833 struct ath_wiphy *aphy = hw->priv;
834 struct ath_hw *ah = sc->sc_ah; 836 struct ath_hw *ah = sc->sc_ah;
835 struct ath_common *common = ath9k_hw_common(ah); 837 struct ath_common *common = ath9k_hw_common(ah);
836 838
@@ -854,7 +856,7 @@ static void ath9k_bss_assoc_info(struct ath_softc *sc,
854 ath_beacon_config(sc, vif); 856 ath_beacon_config(sc, vif);
855 857
856 /* Reset rssi stats */ 858 /* Reset rssi stats */
857 aphy->last_rssi = ATH_RSSI_DUMMY_MARKER; 859 sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
858 sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER; 860 sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
859 861
860 sc->sc_flags |= SC_OP_ANI_RUN; 862 sc->sc_flags |= SC_OP_ANI_RUN;
@@ -881,7 +883,7 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
881 ath9k_hw_configpcipowersave(ah, 0, 0); 883 ath9k_hw_configpcipowersave(ah, 0, 0);
882 884
883 if (!ah->curchan) 885 if (!ah->curchan)
884 ah->curchan = ath_get_curchannel(sc, sc->hw); 886 ah->curchan = ath9k_cmn_get_curchannel(sc->hw, ah);
885 887
886 r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false); 888 r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
887 if (r) { 889 if (r) {
@@ -890,7 +892,8 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
890 channel->center_freq, r); 892 channel->center_freq, r);
891 } 893 }
892 894
893 ath_update_txpow(sc); 895 ath9k_cmn_update_txpow(ah, sc->curtxpow,
896 sc->config.txpowlimit, &sc->curtxpow);
894 if (ath_startrecv(sc) != 0) { 897 if (ath_startrecv(sc) != 0) {
895 ath_err(common, "Unable to restart recv logic\n"); 898 ath_err(common, "Unable to restart recv logic\n");
896 goto out; 899 goto out;
@@ -942,7 +945,7 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
942 ath_flushrecv(sc); /* flush recv queue */ 945 ath_flushrecv(sc); /* flush recv queue */
943 946
944 if (!ah->curchan) 947 if (!ah->curchan)
945 ah->curchan = ath_get_curchannel(sc, hw); 948 ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
946 949
947 r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false); 950 r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
948 if (r) { 951 if (r) {
@@ -966,6 +969,8 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
966 struct ieee80211_hw *hw = sc->hw; 969 struct ieee80211_hw *hw = sc->hw;
967 int r; 970 int r;
968 971
972 sc->hw_busy_count = 0;
973
969 /* Stop ANI */ 974 /* Stop ANI */
970 del_timer_sync(&common->ani.timer); 975 del_timer_sync(&common->ani.timer);
971 976
@@ -993,7 +998,8 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
993 * that changes the channel so update any state that 998 * that changes the channel so update any state that
994 * might change as a result. 999 * might change as a result.
995 */ 1000 */
996 ath_update_txpow(sc); 1001 ath9k_cmn_update_txpow(ah, sc->curtxpow,
1002 sc->config.txpowlimit, &sc->curtxpow);
997 1003
998 if ((sc->sc_flags & SC_OP_BEACONS) || !(sc->sc_flags & (SC_OP_OFFCHANNEL))) 1004 if ((sc->sc_flags & SC_OP_BEACONS) || !(sc->sc_flags & (SC_OP_OFFCHANNEL)))
999 ath_beacon_config(sc, NULL); /* restart beacons */ 1005 ath_beacon_config(sc, NULL); /* restart beacons */
@@ -1021,38 +1027,13 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
1021 return r; 1027 return r;
1022} 1028}
1023 1029
1024/* XXX: Remove me once we don't depend on ath9k_channel for all
1025 * this redundant data */
1026void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
1027 struct ath9k_channel *ichan)
1028{
1029 struct ieee80211_channel *chan = hw->conf.channel;
1030 struct ieee80211_conf *conf = &hw->conf;
1031
1032 ichan->channel = chan->center_freq;
1033 ichan->chan = chan;
1034
1035 if (chan->band == IEEE80211_BAND_2GHZ) {
1036 ichan->chanmode = CHANNEL_G;
1037 ichan->channelFlags = CHANNEL_2GHZ | CHANNEL_OFDM | CHANNEL_G;
1038 } else {
1039 ichan->chanmode = CHANNEL_A;
1040 ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM;
1041 }
1042
1043 if (conf_is_ht(conf))
1044 ichan->chanmode = ath_get_extchanmode(sc, chan,
1045 conf->channel_type);
1046}
1047
1048/**********************/ 1030/**********************/
1049/* mac80211 callbacks */ 1031/* mac80211 callbacks */
1050/**********************/ 1032/**********************/
1051 1033
1052static int ath9k_start(struct ieee80211_hw *hw) 1034static int ath9k_start(struct ieee80211_hw *hw)
1053{ 1035{
1054 struct ath_wiphy *aphy = hw->priv; 1036 struct ath_softc *sc = hw->priv;
1055 struct ath_softc *sc = aphy->sc;
1056 struct ath_hw *ah = sc->sc_ah; 1037 struct ath_hw *ah = sc->sc_ah;
1057 struct ath_common *common = ath9k_hw_common(ah); 1038 struct ath_common *common = ath9k_hw_common(ah);
1058 struct ieee80211_channel *curchan = hw->conf.channel; 1039 struct ieee80211_channel *curchan = hw->conf.channel;
@@ -1065,32 +1046,10 @@ static int ath9k_start(struct ieee80211_hw *hw)
1065 1046
1066 mutex_lock(&sc->mutex); 1047 mutex_lock(&sc->mutex);
1067 1048
1068 if (ath9k_wiphy_started(sc)) {
1069 if (sc->chan_idx == curchan->hw_value) {
1070 /*
1071 * Already on the operational channel, the new wiphy
1072 * can be marked active.
1073 */
1074 aphy->state = ATH_WIPHY_ACTIVE;
1075 ieee80211_wake_queues(hw);
1076 } else {
1077 /*
1078 * Another wiphy is on another channel, start the new
1079 * wiphy in paused state.
1080 */
1081 aphy->state = ATH_WIPHY_PAUSED;
1082 ieee80211_stop_queues(hw);
1083 }
1084 mutex_unlock(&sc->mutex);
1085 return 0;
1086 }
1087 aphy->state = ATH_WIPHY_ACTIVE;
1088
1089 /* setup initial channel */ 1049 /* setup initial channel */
1090
1091 sc->chan_idx = curchan->hw_value; 1050 sc->chan_idx = curchan->hw_value;
1092 1051
1093 init_channel = ath_get_curchannel(sc, hw); 1052 init_channel = ath9k_cmn_get_curchannel(hw, ah);
1094 1053
1095 /* Reset SERDES registers */ 1054 /* Reset SERDES registers */
1096 ath9k_hw_configpcipowersave(ah, 0, 0); 1055 ath9k_hw_configpcipowersave(ah, 0, 0);
@@ -1116,7 +1075,8 @@ static int ath9k_start(struct ieee80211_hw *hw)
1116 * This is needed only to setup initial state 1075 * This is needed only to setup initial state
1117 * but it's best done after a reset. 1076 * but it's best done after a reset.
1118 */ 1077 */
1119 ath_update_txpow(sc); 1078 ath9k_cmn_update_txpow(ah, sc->curtxpow,
1079 sc->config.txpowlimit, &sc->curtxpow);
1120 1080
1121 /* 1081 /*
1122 * Setup the hardware after reset: 1082 * Setup the hardware after reset:
@@ -1185,19 +1145,11 @@ mutex_unlock:
1185static int ath9k_tx(struct ieee80211_hw *hw, 1145static int ath9k_tx(struct ieee80211_hw *hw,
1186 struct sk_buff *skb) 1146 struct sk_buff *skb)
1187{ 1147{
1188 struct ath_wiphy *aphy = hw->priv; 1148 struct ath_softc *sc = hw->priv;
1189 struct ath_softc *sc = aphy->sc;
1190 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1149 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1191 struct ath_tx_control txctl; 1150 struct ath_tx_control txctl;
1192 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1151 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1193 1152
1194 if (aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN) {
1195 ath_dbg(common, ATH_DBG_XMIT,
1196 "ath9k: %s: TX in unexpected wiphy state %d\n",
1197 wiphy_name(hw->wiphy), aphy->state);
1198 goto exit;
1199 }
1200
1201 if (sc->ps_enabled) { 1153 if (sc->ps_enabled) {
1202 /* 1154 /*
1203 * mac80211 does not set PM field for normal data frames, so we 1155 * mac80211 does not set PM field for normal data frames, so we
@@ -1256,44 +1208,26 @@ exit:
1256 1208
1257static void ath9k_stop(struct ieee80211_hw *hw) 1209static void ath9k_stop(struct ieee80211_hw *hw)
1258{ 1210{
1259 struct ath_wiphy *aphy = hw->priv; 1211 struct ath_softc *sc = hw->priv;
1260 struct ath_softc *sc = aphy->sc;
1261 struct ath_hw *ah = sc->sc_ah; 1212 struct ath_hw *ah = sc->sc_ah;
1262 struct ath_common *common = ath9k_hw_common(ah); 1213 struct ath_common *common = ath9k_hw_common(ah);
1263 int i;
1264 1214
1265 mutex_lock(&sc->mutex); 1215 mutex_lock(&sc->mutex);
1266 1216
1267 aphy->state = ATH_WIPHY_INACTIVE;
1268
1269 if (led_blink) 1217 if (led_blink)
1270 cancel_delayed_work_sync(&sc->ath_led_blink_work); 1218 cancel_delayed_work_sync(&sc->ath_led_blink_work);
1271 1219
1272 cancel_delayed_work_sync(&sc->tx_complete_work); 1220 cancel_delayed_work_sync(&sc->tx_complete_work);
1221 cancel_delayed_work_sync(&sc->hw_pll_work);
1273 cancel_work_sync(&sc->paprd_work); 1222 cancel_work_sync(&sc->paprd_work);
1274 cancel_work_sync(&sc->hw_check_work); 1223 cancel_work_sync(&sc->hw_check_work);
1275 1224
1276 for (i = 0; i < sc->num_sec_wiphy; i++) {
1277 if (sc->sec_wiphy[i])
1278 break;
1279 }
1280
1281 if (i == sc->num_sec_wiphy) {
1282 cancel_delayed_work_sync(&sc->wiphy_work);
1283 cancel_work_sync(&sc->chan_work);
1284 }
1285
1286 if (sc->sc_flags & SC_OP_INVALID) { 1225 if (sc->sc_flags & SC_OP_INVALID) {
1287 ath_dbg(common, ATH_DBG_ANY, "Device not present\n"); 1226 ath_dbg(common, ATH_DBG_ANY, "Device not present\n");
1288 mutex_unlock(&sc->mutex); 1227 mutex_unlock(&sc->mutex);
1289 return; 1228 return;
1290 } 1229 }
1291 1230
1292 if (ath9k_wiphy_started(sc)) {
1293 mutex_unlock(&sc->mutex);
1294 return; /* another wiphy still in use */
1295 }
1296
1297 /* Ensure HW is awake when we try to shut it down. */ 1231 /* Ensure HW is awake when we try to shut it down. */
1298 ath9k_ps_wakeup(sc); 1232 ath9k_ps_wakeup(sc);
1299 1233
@@ -1319,6 +1253,11 @@ static void ath9k_stop(struct ieee80211_hw *hw)
1319 } else 1253 } else
1320 sc->rx.rxlink = NULL; 1254 sc->rx.rxlink = NULL;
1321 1255
1256 if (sc->rx.frag) {
1257 dev_kfree_skb_any(sc->rx.frag);
1258 sc->rx.frag = NULL;
1259 }
1260
1322 /* disable HAL and put h/w to sleep */ 1261 /* disable HAL and put h/w to sleep */
1323 ath9k_hw_disable(ah); 1262 ath9k_hw_disable(ah);
1324 ath9k_hw_configpcipowersave(ah, 1, 1); 1263 ath9k_hw_configpcipowersave(ah, 1, 1);
@@ -1334,7 +1273,6 @@ static void ath9k_stop(struct ieee80211_hw *hw)
1334 ath9k_ps_restore(sc); 1273 ath9k_ps_restore(sc);
1335 1274
1336 sc->ps_idle = true; 1275 sc->ps_idle = true;
1337 ath9k_set_wiphy_idle(aphy, true);
1338 ath_radio_disable(sc, hw); 1276 ath_radio_disable(sc, hw);
1339 1277
1340 sc->sc_flags |= SC_OP_INVALID; 1278 sc->sc_flags |= SC_OP_INVALID;
@@ -1344,112 +1282,225 @@ static void ath9k_stop(struct ieee80211_hw *hw)
1344 ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n"); 1282 ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n");
1345} 1283}
1346 1284
1347static int ath9k_add_interface(struct ieee80211_hw *hw, 1285bool ath9k_uses_beacons(int type)
1348 struct ieee80211_vif *vif) 1286{
1287 switch (type) {
1288 case NL80211_IFTYPE_AP:
1289 case NL80211_IFTYPE_ADHOC:
1290 case NL80211_IFTYPE_MESH_POINT:
1291 return true;
1292 default:
1293 return false;
1294 }
1295}
1296
1297static void ath9k_reclaim_beacon(struct ath_softc *sc,
1298 struct ieee80211_vif *vif)
1349{ 1299{
1350 struct ath_wiphy *aphy = hw->priv;
1351 struct ath_softc *sc = aphy->sc;
1352 struct ath_hw *ah = sc->sc_ah;
1353 struct ath_common *common = ath9k_hw_common(ah);
1354 struct ath_vif *avp = (void *)vif->drv_priv; 1300 struct ath_vif *avp = (void *)vif->drv_priv;
1355 enum nl80211_iftype ic_opmode = NL80211_IFTYPE_UNSPECIFIED;
1356 int ret = 0;
1357 1301
1358 mutex_lock(&sc->mutex); 1302 ath9k_set_beaconing_status(sc, false);
1303 ath_beacon_return(sc, avp);
1304 ath9k_set_beaconing_status(sc, true);
1305 sc->sc_flags &= ~SC_OP_BEACONS;
1306}
1307
1308static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
1309{
1310 struct ath9k_vif_iter_data *iter_data = data;
1311 int i;
1312
1313 if (iter_data->hw_macaddr)
1314 for (i = 0; i < ETH_ALEN; i++)
1315 iter_data->mask[i] &=
1316 ~(iter_data->hw_macaddr[i] ^ mac[i]);
1359 1317
1360 switch (vif->type) { 1318 switch (vif->type) {
1361 case NL80211_IFTYPE_STATION: 1319 case NL80211_IFTYPE_AP:
1362 ic_opmode = NL80211_IFTYPE_STATION; 1320 iter_data->naps++;
1363 break; 1321 break;
1364 case NL80211_IFTYPE_WDS: 1322 case NL80211_IFTYPE_STATION:
1365 ic_opmode = NL80211_IFTYPE_WDS; 1323 iter_data->nstations++;
1366 break; 1324 break;
1367 case NL80211_IFTYPE_ADHOC: 1325 case NL80211_IFTYPE_ADHOC:
1368 case NL80211_IFTYPE_AP: 1326 iter_data->nadhocs++;
1327 break;
1369 case NL80211_IFTYPE_MESH_POINT: 1328 case NL80211_IFTYPE_MESH_POINT:
1370 if (sc->nbcnvifs >= ATH_BCBUF) { 1329 iter_data->nmeshes++;
1371 ret = -ENOBUFS; 1330 break;
1372 goto out; 1331 case NL80211_IFTYPE_WDS:
1373 } 1332 iter_data->nwds++;
1374 ic_opmode = vif->type;
1375 break; 1333 break;
1376 default: 1334 default:
1377 ath_err(common, "Interface type %d not yet supported\n", 1335 iter_data->nothers++;
1378 vif->type); 1336 break;
1379 ret = -EOPNOTSUPP;
1380 goto out;
1381 } 1337 }
1338}
1382 1339
1383 ath_dbg(common, ATH_DBG_CONFIG, 1340/* Called with sc->mutex held. */
1384 "Attach a VIF of type: %d\n", ic_opmode); 1341void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
1342 struct ieee80211_vif *vif,
1343 struct ath9k_vif_iter_data *iter_data)
1344{
1345 struct ath_softc *sc = hw->priv;
1346 struct ath_hw *ah = sc->sc_ah;
1347 struct ath_common *common = ath9k_hw_common(ah);
1385 1348
1386 /* Set the VIF opmode */ 1349 /*
1387 avp->av_opmode = ic_opmode; 1350 * Use the hardware MAC address as reference, the hardware uses it
1388 avp->av_bslot = -1; 1351 * together with the BSSID mask when matching addresses.
1352 */
1353 memset(iter_data, 0, sizeof(*iter_data));
1354 iter_data->hw_macaddr = common->macaddr;
1355 memset(&iter_data->mask, 0xff, ETH_ALEN);
1389 1356
1390 sc->nvifs++; 1357 if (vif)
1358 ath9k_vif_iter(iter_data, vif->addr, vif);
1391 1359
1392 ath9k_set_bssid_mask(hw, vif); 1360 /* Get list of all active MAC addresses */
1361 ieee80211_iterate_active_interfaces_atomic(sc->hw, ath9k_vif_iter,
1362 iter_data);
1363}
1364
1365/* Called with sc->mutex held. */
1366static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
1367 struct ieee80211_vif *vif)
1368{
1369 struct ath_softc *sc = hw->priv;
1370 struct ath_hw *ah = sc->sc_ah;
1371 struct ath_common *common = ath9k_hw_common(ah);
1372 struct ath9k_vif_iter_data iter_data;
1393 1373
1394 if (sc->nvifs > 1) 1374 ath9k_calculate_iter_data(hw, vif, &iter_data);
1395 goto out; /* skip global settings for secondary vif */
1396 1375
1397 if (ic_opmode == NL80211_IFTYPE_AP) { 1376 ath9k_ps_wakeup(sc);
1377 /* Set BSSID mask. */
1378 memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
1379 ath_hw_setbssidmask(common);
1380
1381 /* Set op-mode & TSF */
1382 if (iter_data.naps > 0) {
1398 ath9k_hw_set_tsfadjust(ah, 1); 1383 ath9k_hw_set_tsfadjust(ah, 1);
1399 sc->sc_flags |= SC_OP_TSF_RESET; 1384 sc->sc_flags |= SC_OP_TSF_RESET;
1400 } 1385 ah->opmode = NL80211_IFTYPE_AP;
1386 } else {
1387 ath9k_hw_set_tsfadjust(ah, 0);
1388 sc->sc_flags &= ~SC_OP_TSF_RESET;
1401 1389
1402 /* Set the device opmode */ 1390 if (iter_data.nwds + iter_data.nmeshes)
1403 ah->opmode = ic_opmode; 1391 ah->opmode = NL80211_IFTYPE_AP;
1392 else if (iter_data.nadhocs)
1393 ah->opmode = NL80211_IFTYPE_ADHOC;
1394 else
1395 ah->opmode = NL80211_IFTYPE_STATION;
1396 }
1404 1397
1405 /* 1398 /*
1406 * Enable MIB interrupts when there are hardware phy counters. 1399 * Enable MIB interrupts when there are hardware phy counters.
1407 * Note we only do this (at the moment) for station mode.
1408 */ 1400 */
1409 if ((vif->type == NL80211_IFTYPE_STATION) || 1401 if ((iter_data.nstations + iter_data.nadhocs + iter_data.nmeshes) > 0) {
1410 (vif->type == NL80211_IFTYPE_ADHOC) ||
1411 (vif->type == NL80211_IFTYPE_MESH_POINT)) {
1412 if (ah->config.enable_ani) 1402 if (ah->config.enable_ani)
1413 ah->imask |= ATH9K_INT_MIB; 1403 ah->imask |= ATH9K_INT_MIB;
1414 ah->imask |= ATH9K_INT_TSFOOR; 1404 ah->imask |= ATH9K_INT_TSFOOR;
1405 } else {
1406 ah->imask &= ~ATH9K_INT_MIB;
1407 ah->imask &= ~ATH9K_INT_TSFOOR;
1415 } 1408 }
1416 1409
1417 ath9k_hw_set_interrupts(ah, ah->imask); 1410 ath9k_hw_set_interrupts(ah, ah->imask);
1411 ath9k_ps_restore(sc);
1418 1412
1419 if (vif->type == NL80211_IFTYPE_AP || 1413 /* Set up ANI */
1420 vif->type == NL80211_IFTYPE_ADHOC) { 1414 if ((iter_data.naps + iter_data.nadhocs) > 0) {
1421 sc->sc_flags |= SC_OP_ANI_RUN; 1415 sc->sc_flags |= SC_OP_ANI_RUN;
1422 ath_start_ani(common); 1416 ath_start_ani(common);
1417 } else {
1418 sc->sc_flags &= ~SC_OP_ANI_RUN;
1419 del_timer_sync(&common->ani.timer);
1423 } 1420 }
1421}
1424 1422
1425out: 1423/* Called with sc->mutex held, vif counts set up properly. */
1426 mutex_unlock(&sc->mutex); 1424static void ath9k_do_vif_add_setup(struct ieee80211_hw *hw,
1427 return ret; 1425 struct ieee80211_vif *vif)
1426{
1427 struct ath_softc *sc = hw->priv;
1428
1429 ath9k_calculate_summary_state(hw, vif);
1430
1431 if (ath9k_uses_beacons(vif->type)) {
1432 int error;
1433 /* This may fail because upper levels do not have beacons
1434 * properly configured yet. That's OK, we assume it
1435 * will be properly configured and then we will be notified
1436 * in the info_changed method and set up beacons properly
1437 * there.
1438 */
1439 ath9k_set_beaconing_status(sc, false);
1440 error = ath_beacon_alloc(sc, vif);
1441 if (!error)
1442 ath_beacon_config(sc, vif);
1443 ath9k_set_beaconing_status(sc, true);
1444 }
1428} 1445}
1429 1446
1430static void ath9k_reclaim_beacon(struct ath_softc *sc, 1447
1431 struct ieee80211_vif *vif) 1448static int ath9k_add_interface(struct ieee80211_hw *hw,
1449 struct ieee80211_vif *vif)
1432{ 1450{
1451 struct ath_softc *sc = hw->priv;
1452 struct ath_hw *ah = sc->sc_ah;
1453 struct ath_common *common = ath9k_hw_common(ah);
1433 struct ath_vif *avp = (void *)vif->drv_priv; 1454 struct ath_vif *avp = (void *)vif->drv_priv;
1455 int ret = 0;
1434 1456
1435 /* Disable SWBA interrupt */ 1457 mutex_lock(&sc->mutex);
1436 sc->sc_ah->imask &= ~ATH9K_INT_SWBA;
1437 ath9k_ps_wakeup(sc);
1438 ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_ah->imask);
1439 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
1440 tasklet_kill(&sc->bcon_tasklet);
1441 ath9k_ps_restore(sc);
1442 1458
1443 ath_beacon_return(sc, avp); 1459 switch (vif->type) {
1444 sc->sc_flags &= ~SC_OP_BEACONS; 1460 case NL80211_IFTYPE_STATION:
1461 case NL80211_IFTYPE_WDS:
1462 case NL80211_IFTYPE_ADHOC:
1463 case NL80211_IFTYPE_AP:
1464 case NL80211_IFTYPE_MESH_POINT:
1465 break;
1466 default:
1467 ath_err(common, "Interface type %d not yet supported\n",
1468 vif->type);
1469 ret = -EOPNOTSUPP;
1470 goto out;
1471 }
1445 1472
1446 if (sc->nbcnvifs > 0) { 1473 if (ath9k_uses_beacons(vif->type)) {
1447 /* Re-enable beaconing */ 1474 if (sc->nbcnvifs >= ATH_BCBUF) {
1448 sc->sc_ah->imask |= ATH9K_INT_SWBA; 1475 ath_err(common, "Not enough beacon buffers when adding"
1449 ath9k_ps_wakeup(sc); 1476 " new interface of type: %i\n",
1450 ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_ah->imask); 1477 vif->type);
1451 ath9k_ps_restore(sc); 1478 ret = -ENOBUFS;
1479 goto out;
1480 }
1452 } 1481 }
1482
1483 if ((vif->type == NL80211_IFTYPE_ADHOC) &&
1484 sc->nvifs > 0) {
1485 ath_err(common, "Cannot create ADHOC interface when other"
1486 " interfaces already exist.\n");
1487 ret = -EINVAL;
1488 goto out;
1489 }
1490
1491 ath_dbg(common, ATH_DBG_CONFIG,
1492 "Attach a VIF of type: %d\n", vif->type);
1493
1494 /* Set the VIF opmode */
1495 avp->av_opmode = vif->type;
1496 avp->av_bslot = -1;
1497
1498 sc->nvifs++;
1499
1500 ath9k_do_vif_add_setup(hw, vif);
1501out:
1502 mutex_unlock(&sc->mutex);
1503 return ret;
1453} 1504}
1454 1505
1455static int ath9k_change_interface(struct ieee80211_hw *hw, 1506static int ath9k_change_interface(struct ieee80211_hw *hw,
@@ -1457,40 +1508,40 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
1457 enum nl80211_iftype new_type, 1508 enum nl80211_iftype new_type,
1458 bool p2p) 1509 bool p2p)
1459{ 1510{
1460 struct ath_wiphy *aphy = hw->priv; 1511 struct ath_softc *sc = hw->priv;
1461 struct ath_softc *sc = aphy->sc;
1462 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1512 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1463 int ret = 0; 1513 int ret = 0;
1464 1514
1465 ath_dbg(common, ATH_DBG_CONFIG, "Change Interface\n"); 1515 ath_dbg(common, ATH_DBG_CONFIG, "Change Interface\n");
1466 mutex_lock(&sc->mutex); 1516 mutex_lock(&sc->mutex);
1467 1517
1468 switch (new_type) { 1518 /* See if new interface type is valid. */
1469 case NL80211_IFTYPE_AP: 1519 if ((new_type == NL80211_IFTYPE_ADHOC) &&
1470 case NL80211_IFTYPE_ADHOC: 1520 (sc->nvifs > 1)) {
1521 ath_err(common, "When using ADHOC, it must be the only"
1522 " interface.\n");
1523 ret = -EINVAL;
1524 goto out;
1525 }
1526
1527 if (ath9k_uses_beacons(new_type) &&
1528 !ath9k_uses_beacons(vif->type)) {
1471 if (sc->nbcnvifs >= ATH_BCBUF) { 1529 if (sc->nbcnvifs >= ATH_BCBUF) {
1472 ath_err(common, "No beacon slot available\n"); 1530 ath_err(common, "No beacon slot available\n");
1473 ret = -ENOBUFS; 1531 ret = -ENOBUFS;
1474 goto out; 1532 goto out;
1475 } 1533 }
1476 break;
1477 case NL80211_IFTYPE_STATION:
1478 /* Stop ANI */
1479 sc->sc_flags &= ~SC_OP_ANI_RUN;
1480 del_timer_sync(&common->ani.timer);
1481 if ((vif->type == NL80211_IFTYPE_AP) ||
1482 (vif->type == NL80211_IFTYPE_ADHOC))
1483 ath9k_reclaim_beacon(sc, vif);
1484 break;
1485 default:
1486 ath_err(common, "Interface type %d not yet supported\n",
1487 vif->type);
1488 ret = -ENOTSUPP;
1489 goto out;
1490 } 1534 }
1535
1536 /* Clean up old vif stuff */
1537 if (ath9k_uses_beacons(vif->type))
1538 ath9k_reclaim_beacon(sc, vif);
1539
1540 /* Add new settings */
1491 vif->type = new_type; 1541 vif->type = new_type;
1492 vif->p2p = p2p; 1542 vif->p2p = p2p;
1493 1543
1544 ath9k_do_vif_add_setup(hw, vif);
1494out: 1545out:
1495 mutex_unlock(&sc->mutex); 1546 mutex_unlock(&sc->mutex);
1496 return ret; 1547 return ret;
@@ -1499,25 +1550,20 @@ out:
1499static void ath9k_remove_interface(struct ieee80211_hw *hw, 1550static void ath9k_remove_interface(struct ieee80211_hw *hw,
1500 struct ieee80211_vif *vif) 1551 struct ieee80211_vif *vif)
1501{ 1552{
1502 struct ath_wiphy *aphy = hw->priv; 1553 struct ath_softc *sc = hw->priv;
1503 struct ath_softc *sc = aphy->sc;
1504 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1554 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1505 1555
1506 ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface\n"); 1556 ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface\n");
1507 1557
1508 mutex_lock(&sc->mutex); 1558 mutex_lock(&sc->mutex);
1509 1559
1510 /* Stop ANI */ 1560 sc->nvifs--;
1511 sc->sc_flags &= ~SC_OP_ANI_RUN;
1512 del_timer_sync(&common->ani.timer);
1513 1561
1514 /* Reclaim beacon resources */ 1562 /* Reclaim beacon resources */
1515 if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) || 1563 if (ath9k_uses_beacons(vif->type))
1516 (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) ||
1517 (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT))
1518 ath9k_reclaim_beacon(sc, vif); 1564 ath9k_reclaim_beacon(sc, vif);
1519 1565
1520 sc->nvifs--; 1566 ath9k_calculate_summary_state(hw, NULL);
1521 1567
1522 mutex_unlock(&sc->mutex); 1568 mutex_unlock(&sc->mutex);
1523} 1569}
@@ -1558,12 +1604,11 @@ static void ath9k_disable_ps(struct ath_softc *sc)
1558 1604
1559static int ath9k_config(struct ieee80211_hw *hw, u32 changed) 1605static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1560{ 1606{
1561 struct ath_wiphy *aphy = hw->priv; 1607 struct ath_softc *sc = hw->priv;
1562 struct ath_softc *sc = aphy->sc;
1563 struct ath_hw *ah = sc->sc_ah; 1608 struct ath_hw *ah = sc->sc_ah;
1564 struct ath_common *common = ath9k_hw_common(ah); 1609 struct ath_common *common = ath9k_hw_common(ah);
1565 struct ieee80211_conf *conf = &hw->conf; 1610 struct ieee80211_conf *conf = &hw->conf;
1566 bool disable_radio; 1611 bool disable_radio = false;
1567 1612
1568 mutex_lock(&sc->mutex); 1613 mutex_lock(&sc->mutex);
1569 1614
@@ -1574,29 +1619,13 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1574 * the end. 1619 * the end.
1575 */ 1620 */
1576 if (changed & IEEE80211_CONF_CHANGE_IDLE) { 1621 if (changed & IEEE80211_CONF_CHANGE_IDLE) {
1577 bool enable_radio; 1622 sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
1578 bool all_wiphys_idle; 1623 if (!sc->ps_idle) {
1579 bool idle = !!(conf->flags & IEEE80211_CONF_IDLE);
1580
1581 spin_lock_bh(&sc->wiphy_lock);
1582 all_wiphys_idle = ath9k_all_wiphys_idle(sc);
1583 ath9k_set_wiphy_idle(aphy, idle);
1584
1585 enable_radio = (!idle && all_wiphys_idle);
1586
1587 /*
1588 * After we unlock here its possible another wiphy
1589 * can be re-renabled so to account for that we will
1590 * only disable the radio toward the end of this routine
1591 * if by then all wiphys are still idle.
1592 */
1593 spin_unlock_bh(&sc->wiphy_lock);
1594
1595 if (enable_radio) {
1596 sc->ps_idle = false;
1597 ath_radio_enable(sc, hw); 1624 ath_radio_enable(sc, hw);
1598 ath_dbg(common, ATH_DBG_CONFIG, 1625 ath_dbg(common, ATH_DBG_CONFIG,
1599 "not-idle: enabling radio\n"); 1626 "not-idle: enabling radio\n");
1627 } else {
1628 disable_radio = true;
1600 } 1629 }
1601 } 1630 }
1602 1631
@@ -1637,29 +1666,17 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1637 if (ah->curchan) 1666 if (ah->curchan)
1638 old_pos = ah->curchan - &ah->channels[0]; 1667 old_pos = ah->curchan - &ah->channels[0];
1639 1668
1640 aphy->chan_idx = pos;
1641 aphy->chan_is_ht = conf_is_ht(conf);
1642 if (hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) 1669 if (hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
1643 sc->sc_flags |= SC_OP_OFFCHANNEL; 1670 sc->sc_flags |= SC_OP_OFFCHANNEL;
1644 else 1671 else
1645 sc->sc_flags &= ~SC_OP_OFFCHANNEL; 1672 sc->sc_flags &= ~SC_OP_OFFCHANNEL;
1646 1673
1647 if (aphy->state == ATH_WIPHY_SCAN || 1674 ath_dbg(common, ATH_DBG_CONFIG,
1648 aphy->state == ATH_WIPHY_ACTIVE) 1675 "Set channel: %d MHz type: %d\n",
1649 ath9k_wiphy_pause_all_forced(sc, aphy); 1676 curchan->center_freq, conf->channel_type);
1650 else {
1651 /*
1652 * Do not change operational channel based on a paused
1653 * wiphy changes.
1654 */
1655 goto skip_chan_change;
1656 }
1657
1658 ath_dbg(common, ATH_DBG_CONFIG, "Set channel: %d MHz\n",
1659 curchan->center_freq);
1660 1677
1661 /* XXX: remove me eventualy */ 1678 ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos],
1662 ath9k_update_ichannel(sc, hw, &sc->sc_ah->channels[pos]); 1679 curchan, conf->channel_type);
1663 1680
1664 /* update survey stats for the old channel before switching */ 1681 /* update survey stats for the old channel before switching */
1665 spin_lock_irqsave(&common->cc_lock, flags); 1682 spin_lock_irqsave(&common->cc_lock, flags);
@@ -1701,21 +1718,18 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1701 ath_update_survey_nf(sc, old_pos); 1718 ath_update_survey_nf(sc, old_pos);
1702 } 1719 }
1703 1720
1704skip_chan_change:
1705 if (changed & IEEE80211_CONF_CHANGE_POWER) { 1721 if (changed & IEEE80211_CONF_CHANGE_POWER) {
1722 ath_dbg(common, ATH_DBG_CONFIG,
1723 "Set power: %d\n", conf->power_level);
1706 sc->config.txpowlimit = 2 * conf->power_level; 1724 sc->config.txpowlimit = 2 * conf->power_level;
1707 ath9k_ps_wakeup(sc); 1725 ath9k_ps_wakeup(sc);
1708 ath_update_txpow(sc); 1726 ath9k_cmn_update_txpow(ah, sc->curtxpow,
1727 sc->config.txpowlimit, &sc->curtxpow);
1709 ath9k_ps_restore(sc); 1728 ath9k_ps_restore(sc);
1710 } 1729 }
1711 1730
1712 spin_lock_bh(&sc->wiphy_lock);
1713 disable_radio = ath9k_all_wiphys_idle(sc);
1714 spin_unlock_bh(&sc->wiphy_lock);
1715
1716 if (disable_radio) { 1731 if (disable_radio) {
1717 ath_dbg(common, ATH_DBG_CONFIG, "idle: disabling radio\n"); 1732 ath_dbg(common, ATH_DBG_CONFIG, "idle: disabling radio\n");
1718 sc->ps_idle = true;
1719 ath_radio_disable(sc, hw); 1733 ath_radio_disable(sc, hw);
1720 } 1734 }
1721 1735
@@ -1740,8 +1754,7 @@ static void ath9k_configure_filter(struct ieee80211_hw *hw,
1740 unsigned int *total_flags, 1754 unsigned int *total_flags,
1741 u64 multicast) 1755 u64 multicast)
1742{ 1756{
1743 struct ath_wiphy *aphy = hw->priv; 1757 struct ath_softc *sc = hw->priv;
1744 struct ath_softc *sc = aphy->sc;
1745 u32 rfilt; 1758 u32 rfilt;
1746 1759
1747 changed_flags &= SUPPORTED_FILTERS; 1760 changed_flags &= SUPPORTED_FILTERS;
@@ -1761,8 +1774,7 @@ static int ath9k_sta_add(struct ieee80211_hw *hw,
1761 struct ieee80211_vif *vif, 1774 struct ieee80211_vif *vif,
1762 struct ieee80211_sta *sta) 1775 struct ieee80211_sta *sta)
1763{ 1776{
1764 struct ath_wiphy *aphy = hw->priv; 1777 struct ath_softc *sc = hw->priv;
1765 struct ath_softc *sc = aphy->sc;
1766 1778
1767 ath_node_attach(sc, sta); 1779 ath_node_attach(sc, sta);
1768 1780
@@ -1773,8 +1785,7 @@ static int ath9k_sta_remove(struct ieee80211_hw *hw,
1773 struct ieee80211_vif *vif, 1785 struct ieee80211_vif *vif,
1774 struct ieee80211_sta *sta) 1786 struct ieee80211_sta *sta)
1775{ 1787{
1776 struct ath_wiphy *aphy = hw->priv; 1788 struct ath_softc *sc = hw->priv;
1777 struct ath_softc *sc = aphy->sc;
1778 1789
1779 ath_node_detach(sc, sta); 1790 ath_node_detach(sc, sta);
1780 1791
@@ -1784,8 +1795,7 @@ static int ath9k_sta_remove(struct ieee80211_hw *hw,
1784static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue, 1795static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
1785 const struct ieee80211_tx_queue_params *params) 1796 const struct ieee80211_tx_queue_params *params)
1786{ 1797{
1787 struct ath_wiphy *aphy = hw->priv; 1798 struct ath_softc *sc = hw->priv;
1788 struct ath_softc *sc = aphy->sc;
1789 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1799 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1790 struct ath_txq *txq; 1800 struct ath_txq *txq;
1791 struct ath9k_tx_queue_info qi; 1801 struct ath9k_tx_queue_info qi;
@@ -1829,8 +1839,7 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
1829 struct ieee80211_sta *sta, 1839 struct ieee80211_sta *sta,
1830 struct ieee80211_key_conf *key) 1840 struct ieee80211_key_conf *key)
1831{ 1841{
1832 struct ath_wiphy *aphy = hw->priv; 1842 struct ath_softc *sc = hw->priv;
1833 struct ath_softc *sc = aphy->sc;
1834 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1843 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1835 int ret = 0; 1844 int ret = 0;
1836 1845
@@ -1874,8 +1883,8 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
1874 struct ieee80211_bss_conf *bss_conf, 1883 struct ieee80211_bss_conf *bss_conf,
1875 u32 changed) 1884 u32 changed)
1876{ 1885{
1877 struct ath_wiphy *aphy = hw->priv; 1886 struct ath_softc *sc = hw->priv;
1878 struct ath_softc *sc = aphy->sc; 1887 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
1879 struct ath_hw *ah = sc->sc_ah; 1888 struct ath_hw *ah = sc->sc_ah;
1880 struct ath_common *common = ath9k_hw_common(ah); 1889 struct ath_common *common = ath9k_hw_common(ah);
1881 struct ath_vif *avp = (void *)vif->drv_priv; 1890 struct ath_vif *avp = (void *)vif->drv_priv;
@@ -1904,10 +1913,11 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
1904 /* Enable transmission of beacons (AP, IBSS, MESH) */ 1913 /* Enable transmission of beacons (AP, IBSS, MESH) */
1905 if ((changed & BSS_CHANGED_BEACON) || 1914 if ((changed & BSS_CHANGED_BEACON) ||
1906 ((changed & BSS_CHANGED_BEACON_ENABLED) && bss_conf->enable_beacon)) { 1915 ((changed & BSS_CHANGED_BEACON_ENABLED) && bss_conf->enable_beacon)) {
1907 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); 1916 ath9k_set_beaconing_status(sc, false);
1908 error = ath_beacon_alloc(aphy, vif); 1917 error = ath_beacon_alloc(sc, vif);
1909 if (!error) 1918 if (!error)
1910 ath_beacon_config(sc, vif); 1919 ath_beacon_config(sc, vif);
1920 ath9k_set_beaconing_status(sc, true);
1911 } 1921 }
1912 1922
1913 if (changed & BSS_CHANGED_ERP_SLOT) { 1923 if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -1930,21 +1940,26 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
1930 } 1940 }
1931 1941
1932 /* Disable transmission of beacons */ 1942 /* Disable transmission of beacons */
1933 if ((changed & BSS_CHANGED_BEACON_ENABLED) && !bss_conf->enable_beacon) 1943 if ((changed & BSS_CHANGED_BEACON_ENABLED) &&
1934 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); 1944 !bss_conf->enable_beacon) {
1945 ath9k_set_beaconing_status(sc, false);
1946 avp->is_bslot_active = false;
1947 ath9k_set_beaconing_status(sc, true);
1948 }
1935 1949
1936 if (changed & BSS_CHANGED_BEACON_INT) { 1950 if (changed & BSS_CHANGED_BEACON_INT) {
1937 sc->beacon_interval = bss_conf->beacon_int; 1951 cur_conf->beacon_interval = bss_conf->beacon_int;
1938 /* 1952 /*
1939 * In case of AP mode, the HW TSF has to be reset 1953 * In case of AP mode, the HW TSF has to be reset
1940 * when the beacon interval changes. 1954 * when the beacon interval changes.
1941 */ 1955 */
1942 if (vif->type == NL80211_IFTYPE_AP) { 1956 if (vif->type == NL80211_IFTYPE_AP) {
1943 sc->sc_flags |= SC_OP_TSF_RESET; 1957 sc->sc_flags |= SC_OP_TSF_RESET;
1944 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); 1958 ath9k_set_beaconing_status(sc, false);
1945 error = ath_beacon_alloc(aphy, vif); 1959 error = ath_beacon_alloc(sc, vif);
1946 if (!error) 1960 if (!error)
1947 ath_beacon_config(sc, vif); 1961 ath_beacon_config(sc, vif);
1962 ath9k_set_beaconing_status(sc, true);
1948 } else { 1963 } else {
1949 ath_beacon_config(sc, vif); 1964 ath_beacon_config(sc, vif);
1950 } 1965 }
@@ -1980,9 +1995,8 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
1980 1995
1981static u64 ath9k_get_tsf(struct ieee80211_hw *hw) 1996static u64 ath9k_get_tsf(struct ieee80211_hw *hw)
1982{ 1997{
1998 struct ath_softc *sc = hw->priv;
1983 u64 tsf; 1999 u64 tsf;
1984 struct ath_wiphy *aphy = hw->priv;
1985 struct ath_softc *sc = aphy->sc;
1986 2000
1987 mutex_lock(&sc->mutex); 2001 mutex_lock(&sc->mutex);
1988 ath9k_ps_wakeup(sc); 2002 ath9k_ps_wakeup(sc);
@@ -1995,8 +2009,7 @@ static u64 ath9k_get_tsf(struct ieee80211_hw *hw)
1995 2009
1996static void ath9k_set_tsf(struct ieee80211_hw *hw, u64 tsf) 2010static void ath9k_set_tsf(struct ieee80211_hw *hw, u64 tsf)
1997{ 2011{
1998 struct ath_wiphy *aphy = hw->priv; 2012 struct ath_softc *sc = hw->priv;
1999 struct ath_softc *sc = aphy->sc;
2000 2013
2001 mutex_lock(&sc->mutex); 2014 mutex_lock(&sc->mutex);
2002 ath9k_ps_wakeup(sc); 2015 ath9k_ps_wakeup(sc);
@@ -2007,8 +2020,7 @@ static void ath9k_set_tsf(struct ieee80211_hw *hw, u64 tsf)
2007 2020
2008static void ath9k_reset_tsf(struct ieee80211_hw *hw) 2021static void ath9k_reset_tsf(struct ieee80211_hw *hw)
2009{ 2022{
2010 struct ath_wiphy *aphy = hw->priv; 2023 struct ath_softc *sc = hw->priv;
2011 struct ath_softc *sc = aphy->sc;
2012 2024
2013 mutex_lock(&sc->mutex); 2025 mutex_lock(&sc->mutex);
2014 2026
@@ -2023,10 +2035,9 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
2023 struct ieee80211_vif *vif, 2035 struct ieee80211_vif *vif,
2024 enum ieee80211_ampdu_mlme_action action, 2036 enum ieee80211_ampdu_mlme_action action,
2025 struct ieee80211_sta *sta, 2037 struct ieee80211_sta *sta,
2026 u16 tid, u16 *ssn) 2038 u16 tid, u16 *ssn, u8 buf_size)
2027{ 2039{
2028 struct ath_wiphy *aphy = hw->priv; 2040 struct ath_softc *sc = hw->priv;
2029 struct ath_softc *sc = aphy->sc;
2030 int ret = 0; 2041 int ret = 0;
2031 2042
2032 local_bh_disable(); 2043 local_bh_disable();
@@ -2071,8 +2082,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
2071static int ath9k_get_survey(struct ieee80211_hw *hw, int idx, 2082static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
2072 struct survey_info *survey) 2083 struct survey_info *survey)
2073{ 2084{
2074 struct ath_wiphy *aphy = hw->priv; 2085 struct ath_softc *sc = hw->priv;
2075 struct ath_softc *sc = aphy->sc;
2076 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2086 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2077 struct ieee80211_supported_band *sband; 2087 struct ieee80211_supported_band *sband;
2078 struct ieee80211_channel *chan; 2088 struct ieee80211_channel *chan;
@@ -2106,52 +2116,68 @@ static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
2106 return 0; 2116 return 0;
2107} 2117}
2108 2118
2109static void ath9k_sw_scan_start(struct ieee80211_hw *hw) 2119static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
2110{ 2120{
2111 struct ath_wiphy *aphy = hw->priv; 2121 struct ath_softc *sc = hw->priv;
2112 struct ath_softc *sc = aphy->sc; 2122 struct ath_hw *ah = sc->sc_ah;
2113 2123
2114 mutex_lock(&sc->mutex); 2124 mutex_lock(&sc->mutex);
2115 if (ath9k_wiphy_scanning(sc)) { 2125 ah->coverage_class = coverage_class;
2116 /* 2126 ath9k_hw_init_global_settings(ah);
2117 * There is a race here in mac80211 but fixing it requires
2118 * we revisit how we handle the scan complete callback.
2119 * After mac80211 fixes we will not have configured hardware
2120 * to the home channel nor would we have configured the RX
2121 * filter yet.
2122 */
2123 mutex_unlock(&sc->mutex);
2124 return;
2125 }
2126
2127 aphy->state = ATH_WIPHY_SCAN;
2128 ath9k_wiphy_pause_all_forced(sc, aphy);
2129 mutex_unlock(&sc->mutex); 2127 mutex_unlock(&sc->mutex);
2130} 2128}
2131 2129
2132/* 2130static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
2133 * XXX: this requires a revisit after the driver
2134 * scan_complete gets moved to another place/removed in mac80211.
2135 */
2136static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
2137{ 2131{
2138 struct ath_wiphy *aphy = hw->priv; 2132#define ATH_FLUSH_TIMEOUT 60 /* ms */
2139 struct ath_softc *sc = aphy->sc; 2133 struct ath_softc *sc = hw->priv;
2134 struct ath_txq *txq;
2135 struct ath_hw *ah = sc->sc_ah;
2136 struct ath_common *common = ath9k_hw_common(ah);
2137 int i, j, npend = 0;
2140 2138
2141 mutex_lock(&sc->mutex); 2139 mutex_lock(&sc->mutex);
2142 aphy->state = ATH_WIPHY_ACTIVE;
2143 mutex_unlock(&sc->mutex);
2144}
2145 2140
2146static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class) 2141 cancel_delayed_work_sync(&sc->tx_complete_work);
2147{
2148 struct ath_wiphy *aphy = hw->priv;
2149 struct ath_softc *sc = aphy->sc;
2150 struct ath_hw *ah = sc->sc_ah;
2151 2142
2152 mutex_lock(&sc->mutex); 2143 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2153 ah->coverage_class = coverage_class; 2144 if (!ATH_TXQ_SETUP(sc, i))
2154 ath9k_hw_init_global_settings(ah); 2145 continue;
2146 txq = &sc->tx.txq[i];
2147
2148 if (!drop) {
2149 for (j = 0; j < ATH_FLUSH_TIMEOUT; j++) {
2150 if (!ath9k_has_pending_frames(sc, txq))
2151 break;
2152 usleep_range(1000, 2000);
2153 }
2154 }
2155
2156 if (drop || ath9k_has_pending_frames(sc, txq)) {
2157 ath_dbg(common, ATH_DBG_QUEUE, "Drop frames from hw queue:%d\n",
2158 txq->axq_qnum);
2159 spin_lock_bh(&txq->axq_lock);
2160 txq->txq_flush_inprogress = true;
2161 spin_unlock_bh(&txq->axq_lock);
2162
2163 ath9k_ps_wakeup(sc);
2164 ath9k_hw_stoptxdma(ah, txq->axq_qnum);
2165 npend = ath9k_hw_numtxpending(ah, txq->axq_qnum);
2166 ath9k_ps_restore(sc);
2167 if (npend)
2168 break;
2169
2170 ath_draintxq(sc, txq, false);
2171 txq->txq_flush_inprogress = false;
2172 }
2173 }
2174
2175 if (npend) {
2176 ath_reset(sc, false);
2177 txq->txq_flush_inprogress = false;
2178 }
2179
2180 ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0);
2155 mutex_unlock(&sc->mutex); 2181 mutex_unlock(&sc->mutex);
2156} 2182}
2157 2183
@@ -2174,8 +2200,7 @@ struct ieee80211_ops ath9k_ops = {
2174 .reset_tsf = ath9k_reset_tsf, 2200 .reset_tsf = ath9k_reset_tsf,
2175 .ampdu_action = ath9k_ampdu_action, 2201 .ampdu_action = ath9k_ampdu_action,
2176 .get_survey = ath9k_get_survey, 2202 .get_survey = ath9k_get_survey,
2177 .sw_scan_start = ath9k_sw_scan_start,
2178 .sw_scan_complete = ath9k_sw_scan_complete,
2179 .rfkill_poll = ath9k_rfkill_poll_state, 2203 .rfkill_poll = ath9k_rfkill_poll_state,
2180 .set_coverage_class = ath9k_set_coverage_class, 2204 .set_coverage_class = ath9k_set_coverage_class,
2205 .flush = ath9k_flush,
2181}; 2206};
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 78ef1f13386..e83128c50f7 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -126,7 +126,6 @@ static const struct ath_bus_ops ath_pci_bus_ops = {
126static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 126static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
127{ 127{
128 void __iomem *mem; 128 void __iomem *mem;
129 struct ath_wiphy *aphy;
130 struct ath_softc *sc; 129 struct ath_softc *sc;
131 struct ieee80211_hw *hw; 130 struct ieee80211_hw *hw;
132 u8 csz; 131 u8 csz;
@@ -198,8 +197,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
198 goto err_iomap; 197 goto err_iomap;
199 } 198 }
200 199
201 hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy) + 200 hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops);
202 sizeof(struct ath_softc), &ath9k_ops);
203 if (!hw) { 201 if (!hw) {
204 dev_err(&pdev->dev, "No memory for ieee80211_hw\n"); 202 dev_err(&pdev->dev, "No memory for ieee80211_hw\n");
205 ret = -ENOMEM; 203 ret = -ENOMEM;
@@ -209,11 +207,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
209 SET_IEEE80211_DEV(hw, &pdev->dev); 207 SET_IEEE80211_DEV(hw, &pdev->dev);
210 pci_set_drvdata(pdev, hw); 208 pci_set_drvdata(pdev, hw);
211 209
212 aphy = hw->priv; 210 sc = hw->priv;
213 sc = (struct ath_softc *) (aphy + 1);
214 aphy->sc = sc;
215 aphy->hw = hw;
216 sc->pri_wiphy = aphy;
217 sc->hw = hw; 211 sc->hw = hw;
218 sc->dev = &pdev->dev; 212 sc->dev = &pdev->dev;
219 sc->mem = mem; 213 sc->mem = mem;
@@ -260,8 +254,7 @@ err_dma:
260static void ath_pci_remove(struct pci_dev *pdev) 254static void ath_pci_remove(struct pci_dev *pdev)
261{ 255{
262 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 256 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
263 struct ath_wiphy *aphy = hw->priv; 257 struct ath_softc *sc = hw->priv;
264 struct ath_softc *sc = aphy->sc;
265 void __iomem *mem = sc->mem; 258 void __iomem *mem = sc->mem;
266 259
267 if (!is_ath9k_unloaded) 260 if (!is_ath9k_unloaded)
@@ -281,8 +274,7 @@ static int ath_pci_suspend(struct device *device)
281{ 274{
282 struct pci_dev *pdev = to_pci_dev(device); 275 struct pci_dev *pdev = to_pci_dev(device);
283 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 276 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
284 struct ath_wiphy *aphy = hw->priv; 277 struct ath_softc *sc = hw->priv;
285 struct ath_softc *sc = aphy->sc;
286 278
287 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1); 279 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
288 280
@@ -293,8 +285,7 @@ static int ath_pci_resume(struct device *device)
293{ 285{
294 struct pci_dev *pdev = to_pci_dev(device); 286 struct pci_dev *pdev = to_pci_dev(device);
295 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 287 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
296 struct ath_wiphy *aphy = hw->priv; 288 struct ath_softc *sc = hw->priv;
297 struct ath_softc *sc = aphy->sc;
298 u32 val; 289 u32 val;
299 290
300 /* 291 /*
@@ -320,7 +311,6 @@ static int ath_pci_resume(struct device *device)
320 ath9k_ps_restore(sc); 311 ath9k_ps_restore(sc);
321 312
322 sc->ps_idle = true; 313 sc->ps_idle = true;
323 ath9k_set_wiphy_idle(aphy, true);
324 ath_radio_disable(sc, hw); 314 ath_radio_disable(sc, hw);
325 315
326 return 0; 316 return 0;
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index e45147820ea..960d717ca7c 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -1560,8 +1560,7 @@ static void ath_rate_add_sta_debugfs(void *priv, void *priv_sta,
1560 1560
1561static void *ath_rate_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) 1561static void *ath_rate_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
1562{ 1562{
1563 struct ath_wiphy *aphy = hw->priv; 1563 return hw->priv;
1564 return aphy->sc;
1565} 1564}
1566 1565
1567static void ath_rate_free(void *priv) 1566static void ath_rate_free(void *priv)
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index b2497b8601e..daf171d2f61 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -34,27 +34,6 @@ static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
34 (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP); 34 (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP);
35} 35}
36 36
37static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc,
38 struct ieee80211_hdr *hdr)
39{
40 struct ieee80211_hw *hw = sc->pri_wiphy->hw;
41 int i;
42
43 spin_lock_bh(&sc->wiphy_lock);
44 for (i = 0; i < sc->num_sec_wiphy; i++) {
45 struct ath_wiphy *aphy = sc->sec_wiphy[i];
46 if (aphy == NULL)
47 continue;
48 if (compare_ether_addr(hdr->addr1, aphy->hw->wiphy->perm_addr)
49 == 0) {
50 hw = aphy->hw;
51 break;
52 }
53 }
54 spin_unlock_bh(&sc->wiphy_lock);
55 return hw;
56}
57
58/* 37/*
59 * Setup and link descriptors. 38 * Setup and link descriptors.
60 * 39 *
@@ -230,11 +209,6 @@ static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
230 int error = 0, i; 209 int error = 0, i;
231 u32 size; 210 u32 size;
232 211
233
234 common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN +
235 ah->caps.rx_status_len,
236 min(common->cachelsz, (u16)64));
237
238 ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize - 212 ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
239 ah->caps.rx_status_len); 213 ah->caps.rx_status_len);
240 214
@@ -321,12 +295,12 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
321 sc->sc_flags &= ~SC_OP_RXFLUSH; 295 sc->sc_flags &= ~SC_OP_RXFLUSH;
322 spin_lock_init(&sc->rx.rxbuflock); 296 spin_lock_init(&sc->rx.rxbuflock);
323 297
298 common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 +
299 sc->sc_ah->caps.rx_status_len;
300
324 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 301 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
325 return ath_rx_edma_init(sc, nbufs); 302 return ath_rx_edma_init(sc, nbufs);
326 } else { 303 } else {
327 common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
328 min(common->cachelsz, (u16)64));
329
330 ath_dbg(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n", 304 ath_dbg(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
331 common->cachelsz, common->rx_bufsize); 305 common->cachelsz, common->rx_bufsize);
332 306
@@ -463,8 +437,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
463 if (conf_is_ht(&sc->hw->conf)) 437 if (conf_is_ht(&sc->hw->conf))
464 rfilt |= ATH9K_RX_FILTER_COMP_BAR; 438 rfilt |= ATH9K_RX_FILTER_COMP_BAR;
465 439
466 if (sc->sec_wiphy || (sc->nvifs > 1) || 440 if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) {
467 (sc->rx.rxfilter & FIF_OTHER_BSS)) {
468 /* The following may also be needed for other older chips */ 441 /* The following may also be needed for other older chips */
469 if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160) 442 if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160)
470 rfilt |= ATH9K_RX_FILTER_PROM; 443 rfilt |= ATH9K_RX_FILTER_PROM;
@@ -588,8 +561,14 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
588 return; 561 return;
589 562
590 mgmt = (struct ieee80211_mgmt *)skb->data; 563 mgmt = (struct ieee80211_mgmt *)skb->data;
591 if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0) 564 if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0) {
565 /* TODO: This doesn't work well if you have stations
566 * associated to two different APs because curbssid
567 * is just the last AP that any of the stations associated
568 * with.
569 */
592 return; /* not from our current AP */ 570 return; /* not from our current AP */
571 }
593 572
594 sc->ps_flags &= ~PS_WAIT_FOR_BEACON; 573 sc->ps_flags &= ~PS_WAIT_FOR_BEACON;
595 574
@@ -662,37 +641,6 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
662 } 641 }
663} 642}
664 643
665static void ath_rx_send_to_mac80211(struct ieee80211_hw *hw,
666 struct ath_softc *sc, struct sk_buff *skb)
667{
668 struct ieee80211_hdr *hdr;
669
670 hdr = (struct ieee80211_hdr *)skb->data;
671
672 /* Send the frame to mac80211 */
673 if (is_multicast_ether_addr(hdr->addr1)) {
674 int i;
675 /*
676 * Deliver broadcast/multicast frames to all suitable
677 * virtual wiphys.
678 */
679 /* TODO: filter based on channel configuration */
680 for (i = 0; i < sc->num_sec_wiphy; i++) {
681 struct ath_wiphy *aphy = sc->sec_wiphy[i];
682 struct sk_buff *nskb;
683 if (aphy == NULL)
684 continue;
685 nskb = skb_copy(skb, GFP_ATOMIC);
686 if (!nskb)
687 continue;
688 ieee80211_rx(aphy->hw, nskb);
689 }
690 ieee80211_rx(sc->hw, skb);
691 } else
692 /* Deliver unicast frames based on receiver address */
693 ieee80211_rx(hw, skb);
694}
695
696static bool ath_edma_get_buffers(struct ath_softc *sc, 644static bool ath_edma_get_buffers(struct ath_softc *sc,
697 enum ath9k_rx_qtype qtype) 645 enum ath9k_rx_qtype qtype)
698{ 646{
@@ -862,15 +810,9 @@ static bool ath9k_rx_accept(struct ath_common *common,
862 if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len)) 810 if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len))
863 return false; 811 return false;
864 812
865 /* 813 /* Only use error bits from the last fragment */
866 * rs_more indicates chained descriptors which can be used
867 * to link buffers together for a sort of scatter-gather
868 * operation.
869 * reject the frame, we don't support scatter-gather yet and
870 * the frame is probably corrupt anyway
871 */
872 if (rx_stats->rs_more) 814 if (rx_stats->rs_more)
873 return false; 815 return true;
874 816
875 /* 817 /*
876 * The rx_stats->rs_status will not be set until the end of the 818 * The rx_stats->rs_status will not be set until the end of the
@@ -974,7 +916,7 @@ static void ath9k_process_rssi(struct ath_common *common,
974 struct ieee80211_hdr *hdr, 916 struct ieee80211_hdr *hdr,
975 struct ath_rx_status *rx_stats) 917 struct ath_rx_status *rx_stats)
976{ 918{
977 struct ath_wiphy *aphy = hw->priv; 919 struct ath_softc *sc = hw->priv;
978 struct ath_hw *ah = common->ah; 920 struct ath_hw *ah = common->ah;
979 int last_rssi; 921 int last_rssi;
980 __le16 fc; 922 __le16 fc;
@@ -984,13 +926,19 @@ static void ath9k_process_rssi(struct ath_common *common,
984 926
985 fc = hdr->frame_control; 927 fc = hdr->frame_control;
986 if (!ieee80211_is_beacon(fc) || 928 if (!ieee80211_is_beacon(fc) ||
987 compare_ether_addr(hdr->addr3, common->curbssid)) 929 compare_ether_addr(hdr->addr3, common->curbssid)) {
930 /* TODO: This doesn't work well if you have stations
931 * associated to two different APs because curbssid
932 * is just the last AP that any of the stations associated
933 * with.
934 */
988 return; 935 return;
936 }
989 937
990 if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr) 938 if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr)
991 ATH_RSSI_LPF(aphy->last_rssi, rx_stats->rs_rssi); 939 ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi);
992 940
993 last_rssi = aphy->last_rssi; 941 last_rssi = sc->last_rssi;
994 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER)) 942 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
995 rx_stats->rs_rssi = ATH_EP_RND(last_rssi, 943 rx_stats->rs_rssi = ATH_EP_RND(last_rssi,
996 ATH_RSSI_EP_MULTIPLIER); 944 ATH_RSSI_EP_MULTIPLIER);
@@ -1022,6 +970,10 @@ static int ath9k_rx_skb_preprocess(struct ath_common *common,
1022 if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) 970 if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error))
1023 return -EINVAL; 971 return -EINVAL;
1024 972
973 /* Only use status info from the last fragment */
974 if (rx_stats->rs_more)
975 return 0;
976
1025 ath9k_process_rssi(common, hw, hdr, rx_stats); 977 ath9k_process_rssi(common, hw, hdr, rx_stats);
1026 978
1027 if (ath9k_process_rate(common, hw, rx_stats, rx_status)) 979 if (ath9k_process_rate(common, hw, rx_stats, rx_status))
@@ -1623,7 +1575,7 @@ div_comb_done:
1623int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) 1575int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1624{ 1576{
1625 struct ath_buf *bf; 1577 struct ath_buf *bf;
1626 struct sk_buff *skb = NULL, *requeue_skb; 1578 struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb;
1627 struct ieee80211_rx_status *rxs; 1579 struct ieee80211_rx_status *rxs;
1628 struct ath_hw *ah = sc->sc_ah; 1580 struct ath_hw *ah = sc->sc_ah;
1629 struct ath_common *common = ath9k_hw_common(ah); 1581 struct ath_common *common = ath9k_hw_common(ah);
@@ -1632,7 +1584,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1632 * virtual wiphy so to account for that we iterate over the active 1584 * virtual wiphy so to account for that we iterate over the active
1633 * wiphys and find the appropriate wiphy and therefore hw. 1585 * wiphys and find the appropriate wiphy and therefore hw.
1634 */ 1586 */
1635 struct ieee80211_hw *hw = NULL; 1587 struct ieee80211_hw *hw = sc->hw;
1636 struct ieee80211_hdr *hdr; 1588 struct ieee80211_hdr *hdr;
1637 int retval; 1589 int retval;
1638 bool decrypt_error = false; 1590 bool decrypt_error = false;
@@ -1674,10 +1626,17 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1674 if (!skb) 1626 if (!skb)
1675 continue; 1627 continue;
1676 1628
1677 hdr = (struct ieee80211_hdr *) (skb->data + rx_status_len); 1629 /*
1678 rxs = IEEE80211_SKB_RXCB(skb); 1630 * Take frame header from the first fragment and RX status from
1631 * the last one.
1632 */
1633 if (sc->rx.frag)
1634 hdr_skb = sc->rx.frag;
1635 else
1636 hdr_skb = skb;
1679 1637
1680 hw = ath_get_virt_hw(sc, hdr); 1638 hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len);
1639 rxs = IEEE80211_SKB_RXCB(hdr_skb);
1681 1640
1682 ath_debug_stat_rx(sc, &rs); 1641 ath_debug_stat_rx(sc, &rs);
1683 1642
@@ -1686,12 +1645,12 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1686 * chain it back at the queue without processing it. 1645 * chain it back at the queue without processing it.
1687 */ 1646 */
1688 if (flush) 1647 if (flush)
1689 goto requeue; 1648 goto requeue_drop_frag;
1690 1649
1691 retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs, 1650 retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
1692 rxs, &decrypt_error); 1651 rxs, &decrypt_error);
1693 if (retval) 1652 if (retval)
1694 goto requeue; 1653 goto requeue_drop_frag;
1695 1654
1696 rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp; 1655 rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp;
1697 if (rs.rs_tstamp > tsf_lower && 1656 if (rs.rs_tstamp > tsf_lower &&
@@ -1711,7 +1670,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1711 * skb and put it at the tail of the sc->rx.rxbuf list for 1670 * skb and put it at the tail of the sc->rx.rxbuf list for
1712 * processing. */ 1671 * processing. */
1713 if (!requeue_skb) 1672 if (!requeue_skb)
1714 goto requeue; 1673 goto requeue_drop_frag;
1715 1674
1716 /* Unmap the frame */ 1675 /* Unmap the frame */
1717 dma_unmap_single(sc->dev, bf->bf_buf_addr, 1676 dma_unmap_single(sc->dev, bf->bf_buf_addr,
@@ -1722,8 +1681,9 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1722 if (ah->caps.rx_status_len) 1681 if (ah->caps.rx_status_len)
1723 skb_pull(skb, ah->caps.rx_status_len); 1682 skb_pull(skb, ah->caps.rx_status_len);
1724 1683
1725 ath9k_rx_skb_postprocess(common, skb, &rs, 1684 if (!rs.rs_more)
1726 rxs, decrypt_error); 1685 ath9k_rx_skb_postprocess(common, hdr_skb, &rs,
1686 rxs, decrypt_error);
1727 1687
1728 /* We will now give hardware our shiny new allocated skb */ 1688 /* We will now give hardware our shiny new allocated skb */
1729 bf->bf_mpdu = requeue_skb; 1689 bf->bf_mpdu = requeue_skb;
@@ -1736,10 +1696,42 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1736 bf->bf_mpdu = NULL; 1696 bf->bf_mpdu = NULL;
1737 bf->bf_buf_addr = 0; 1697 bf->bf_buf_addr = 0;
1738 ath_err(common, "dma_mapping_error() on RX\n"); 1698 ath_err(common, "dma_mapping_error() on RX\n");
1739 ath_rx_send_to_mac80211(hw, sc, skb); 1699 ieee80211_rx(hw, skb);
1740 break; 1700 break;
1741 } 1701 }
1742 1702
1703 if (rs.rs_more) {
1704 /*
1705 * rs_more indicates chained descriptors which can be
1706 * used to link buffers together for a sort of
1707 * scatter-gather operation.
1708 */
1709 if (sc->rx.frag) {
1710 /* too many fragments - cannot handle frame */
1711 dev_kfree_skb_any(sc->rx.frag);
1712 dev_kfree_skb_any(skb);
1713 skb = NULL;
1714 }
1715 sc->rx.frag = skb;
1716 goto requeue;
1717 }
1718
1719 if (sc->rx.frag) {
1720 int space = skb->len - skb_tailroom(hdr_skb);
1721
1722 sc->rx.frag = NULL;
1723
1724 if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) {
1725 dev_kfree_skb(skb);
1726 goto requeue_drop_frag;
1727 }
1728
1729 skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len),
1730 skb->len);
1731 dev_kfree_skb_any(skb);
1732 skb = hdr_skb;
1733 }
1734
1743 /* 1735 /*
1744 * change the default rx antenna if rx diversity chooses the 1736 * change the default rx antenna if rx diversity chooses the
1745 * other antenna 3 times in a row. 1737 * other antenna 3 times in a row.
@@ -1763,8 +1755,13 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1763 if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) 1755 if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
1764 ath_ant_comb_scan(sc, &rs); 1756 ath_ant_comb_scan(sc, &rs);
1765 1757
1766 ath_rx_send_to_mac80211(hw, sc, skb); 1758 ieee80211_rx(hw, skb);
1767 1759
1760requeue_drop_frag:
1761 if (sc->rx.frag) {
1762 dev_kfree_skb_any(sc->rx.frag);
1763 sc->rx.frag = NULL;
1764 }
1768requeue: 1765requeue:
1769 if (edma) { 1766 if (edma) {
1770 list_add_tail(&bf->list, &sc->rx.rxbuf); 1767 list_add_tail(&bf->list, &sc->rx.rxbuf);
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 4df5659c6c1..64b226a78b2 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -789,6 +789,7 @@
789#define AR_SREV_REVISION_9300_20 2 /* 2.0 and 2.1 */ 789#define AR_SREV_REVISION_9300_20 2 /* 2.0 and 2.1 */
790#define AR_SREV_VERSION_9485 0x240 790#define AR_SREV_VERSION_9485 0x240
791#define AR_SREV_REVISION_9485_10 0 791#define AR_SREV_REVISION_9485_10 0
792#define AR_SREV_REVISION_9485_11 1
792 793
793#define AR_SREV_5416(_ah) \ 794#define AR_SREV_5416(_ah) \
794 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) || \ 795 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) || \
@@ -866,6 +867,9 @@
866#define AR_SREV_9485_10(_ah) \ 867#define AR_SREV_9485_10(_ah) \
867 (AR_SREV_9485(_ah) && \ 868 (AR_SREV_9485(_ah) && \
868 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9485_10)) 869 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9485_10))
870#define AR_SREV_9485_11(_ah) \
871 (AR_SREV_9485(_ah) && \
872 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9485_11))
869 873
870#define AR_SREV_9285E_20(_ah) \ 874#define AR_SREV_9285E_20(_ah) \
871 (AR_SREV_9285_12_OR_LATER(_ah) && \ 875 (AR_SREV_9285_12_OR_LATER(_ah) && \
@@ -1083,6 +1087,17 @@ enum {
1083#define AR_ENT_OTP 0x40d8 1087#define AR_ENT_OTP 0x40d8
1084#define AR_ENT_OTP_CHAIN2_DISABLE 0x00020000 1088#define AR_ENT_OTP_CHAIN2_DISABLE 0x00020000
1085#define AR_ENT_OTP_MPSD 0x00800000 1089#define AR_ENT_OTP_MPSD 0x00800000
1090#define AR_CH0_BB_DPLL2 0x16184
1091#define AR_CH0_BB_DPLL3 0x16188
1092#define AR_CH0_DDR_DPLL2 0x16244
1093#define AR_CH0_DDR_DPLL3 0x16248
1094#define AR_CH0_DPLL2_KD 0x03F80000
1095#define AR_CH0_DPLL2_KD_S 19
1096#define AR_CH0_DPLL2_KI 0x3C000000
1097#define AR_CH0_DPLL2_KI_S 26
1098#define AR_CH0_DPLL3_PHASE_SHIFT 0x3F800000
1099#define AR_CH0_DPLL3_PHASE_SHIFT_S 23
1100#define AR_PHY_CCA_NOM_VAL_2GHZ -118
1086 1101
1087#define AR_RTC_9300_PLL_DIV 0x000003ff 1102#define AR_RTC_9300_PLL_DIV 0x000003ff
1088#define AR_RTC_9300_PLL_DIV_S 0 1103#define AR_RTC_9300_PLL_DIV_S 0
@@ -1129,6 +1144,12 @@ enum {
1129#define AR_RTC_PLL_CLKSEL 0x00000300 1144#define AR_RTC_PLL_CLKSEL 0x00000300
1130#define AR_RTC_PLL_CLKSEL_S 8 1145#define AR_RTC_PLL_CLKSEL_S 8
1131 1146
1147#define PLL3 0x16188
1148#define PLL3_DO_MEAS_MASK 0x40000000
1149#define PLL4 0x1618c
1150#define PLL4_MEAS_DONE 0x8
1151#define SQSUM_DVC_MASK 0x007ffff8
1152
1132#define AR_RTC_RESET \ 1153#define AR_RTC_RESET \
1133 ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0040) : 0x7040) 1154 ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0040) : 0x7040)
1134#define AR_RTC_RESET_EN (0x00000001) 1155#define AR_RTC_RESET_EN (0x00000001)
diff --git a/drivers/net/wireless/ath/ath9k/virtual.c b/drivers/net/wireless/ath/ath9k/virtual.c
deleted file mode 100644
index 2dc7095e56d..00000000000
--- a/drivers/net/wireless/ath/ath9k/virtual.c
+++ /dev/null
@@ -1,717 +0,0 @@
1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/slab.h>
18
19#include "ath9k.h"
20
21struct ath9k_vif_iter_data {
22 const u8 *hw_macaddr;
23 u8 mask[ETH_ALEN];
24};
25
26static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
27{
28 struct ath9k_vif_iter_data *iter_data = data;
29 int i;
30
31 for (i = 0; i < ETH_ALEN; i++)
32 iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
33}
34
35void ath9k_set_bssid_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
36{
37 struct ath_wiphy *aphy = hw->priv;
38 struct ath_softc *sc = aphy->sc;
39 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
40 struct ath9k_vif_iter_data iter_data;
41 int i;
42
43 /*
44 * Use the hardware MAC address as reference, the hardware uses it
45 * together with the BSSID mask when matching addresses.
46 */
47 iter_data.hw_macaddr = common->macaddr;
48 memset(&iter_data.mask, 0xff, ETH_ALEN);
49
50 if (vif)
51 ath9k_vif_iter(&iter_data, vif->addr, vif);
52
53 /* Get list of all active MAC addresses */
54 spin_lock_bh(&sc->wiphy_lock);
55 ieee80211_iterate_active_interfaces_atomic(sc->hw, ath9k_vif_iter,
56 &iter_data);
57 for (i = 0; i < sc->num_sec_wiphy; i++) {
58 if (sc->sec_wiphy[i] == NULL)
59 continue;
60 ieee80211_iterate_active_interfaces_atomic(
61 sc->sec_wiphy[i]->hw, ath9k_vif_iter, &iter_data);
62 }
63 spin_unlock_bh(&sc->wiphy_lock);
64
65 memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
66 ath_hw_setbssidmask(common);
67}
68
69int ath9k_wiphy_add(struct ath_softc *sc)
70{
71 int i, error;
72 struct ath_wiphy *aphy;
73 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
74 struct ieee80211_hw *hw;
75 u8 addr[ETH_ALEN];
76
77 hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy), &ath9k_ops);
78 if (hw == NULL)
79 return -ENOMEM;
80
81 spin_lock_bh(&sc->wiphy_lock);
82 for (i = 0; i < sc->num_sec_wiphy; i++) {
83 if (sc->sec_wiphy[i] == NULL)
84 break;
85 }
86
87 if (i == sc->num_sec_wiphy) {
88 /* No empty slot available; increase array length */
89 struct ath_wiphy **n;
90 n = krealloc(sc->sec_wiphy,
91 (sc->num_sec_wiphy + 1) *
92 sizeof(struct ath_wiphy *),
93 GFP_ATOMIC);
94 if (n == NULL) {
95 spin_unlock_bh(&sc->wiphy_lock);
96 ieee80211_free_hw(hw);
97 return -ENOMEM;
98 }
99 n[i] = NULL;
100 sc->sec_wiphy = n;
101 sc->num_sec_wiphy++;
102 }
103
104 SET_IEEE80211_DEV(hw, sc->dev);
105
106 aphy = hw->priv;
107 aphy->sc = sc;
108 aphy->hw = hw;
109 sc->sec_wiphy[i] = aphy;
110 aphy->last_rssi = ATH_RSSI_DUMMY_MARKER;
111 spin_unlock_bh(&sc->wiphy_lock);
112
113 memcpy(addr, common->macaddr, ETH_ALEN);
114 addr[0] |= 0x02; /* Locally managed address */
115 /*
116 * XOR virtual wiphy index into the least significant bits to generate
117 * a different MAC address for each virtual wiphy.
118 */
119 addr[5] ^= i & 0xff;
120 addr[4] ^= (i & 0xff00) >> 8;
121 addr[3] ^= (i & 0xff0000) >> 16;
122
123 SET_IEEE80211_PERM_ADDR(hw, addr);
124
125 ath9k_set_hw_capab(sc, hw);
126
127 error = ieee80211_register_hw(hw);
128
129 if (error == 0) {
130 /* Make sure wiphy scheduler is started (if enabled) */
131 ath9k_wiphy_set_scheduler(sc, sc->wiphy_scheduler_int);
132 }
133
134 return error;
135}
136
137int ath9k_wiphy_del(struct ath_wiphy *aphy)
138{
139 struct ath_softc *sc = aphy->sc;
140 int i;
141
142 spin_lock_bh(&sc->wiphy_lock);
143 for (i = 0; i < sc->num_sec_wiphy; i++) {
144 if (aphy == sc->sec_wiphy[i]) {
145 sc->sec_wiphy[i] = NULL;
146 spin_unlock_bh(&sc->wiphy_lock);
147 ieee80211_unregister_hw(aphy->hw);
148 ieee80211_free_hw(aphy->hw);
149 return 0;
150 }
151 }
152 spin_unlock_bh(&sc->wiphy_lock);
153 return -ENOENT;
154}
155
156static int ath9k_send_nullfunc(struct ath_wiphy *aphy,
157 struct ieee80211_vif *vif, const u8 *bssid,
158 int ps)
159{
160 struct ath_softc *sc = aphy->sc;
161 struct ath_tx_control txctl;
162 struct sk_buff *skb;
163 struct ieee80211_hdr *hdr;
164 __le16 fc;
165 struct ieee80211_tx_info *info;
166
167 skb = dev_alloc_skb(24);
168 if (skb == NULL)
169 return -ENOMEM;
170 hdr = (struct ieee80211_hdr *) skb_put(skb, 24);
171 memset(hdr, 0, 24);
172 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC |
173 IEEE80211_FCTL_TODS);
174 if (ps)
175 fc |= cpu_to_le16(IEEE80211_FCTL_PM);
176 hdr->frame_control = fc;
177 memcpy(hdr->addr1, bssid, ETH_ALEN);
178 memcpy(hdr->addr2, aphy->hw->wiphy->perm_addr, ETH_ALEN);
179 memcpy(hdr->addr3, bssid, ETH_ALEN);
180
181 info = IEEE80211_SKB_CB(skb);
182 memset(info, 0, sizeof(*info));
183 info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS;
184 info->control.vif = vif;
185 info->control.rates[0].idx = 0;
186 info->control.rates[0].count = 4;
187 info->control.rates[1].idx = -1;
188
189 memset(&txctl, 0, sizeof(struct ath_tx_control));
190 txctl.txq = sc->tx.txq_map[WME_AC_VO];
191 txctl.frame_type = ps ? ATH9K_IFT_PAUSE : ATH9K_IFT_UNPAUSE;
192
193 if (ath_tx_start(aphy->hw, skb, &txctl) != 0)
194 goto exit;
195
196 return 0;
197exit:
198 dev_kfree_skb_any(skb);
199 return -1;
200}
201
202static bool __ath9k_wiphy_pausing(struct ath_softc *sc)
203{
204 int i;
205 if (sc->pri_wiphy->state == ATH_WIPHY_PAUSING)
206 return true;
207 for (i = 0; i < sc->num_sec_wiphy; i++) {
208 if (sc->sec_wiphy[i] &&
209 sc->sec_wiphy[i]->state == ATH_WIPHY_PAUSING)
210 return true;
211 }
212 return false;
213}
214
215static bool ath9k_wiphy_pausing(struct ath_softc *sc)
216{
217 bool ret;
218 spin_lock_bh(&sc->wiphy_lock);
219 ret = __ath9k_wiphy_pausing(sc);
220 spin_unlock_bh(&sc->wiphy_lock);
221 return ret;
222}
223
224static bool __ath9k_wiphy_scanning(struct ath_softc *sc)
225{
226 int i;
227 if (sc->pri_wiphy->state == ATH_WIPHY_SCAN)
228 return true;
229 for (i = 0; i < sc->num_sec_wiphy; i++) {
230 if (sc->sec_wiphy[i] &&
231 sc->sec_wiphy[i]->state == ATH_WIPHY_SCAN)
232 return true;
233 }
234 return false;
235}
236
237bool ath9k_wiphy_scanning(struct ath_softc *sc)
238{
239 bool ret;
240 spin_lock_bh(&sc->wiphy_lock);
241 ret = __ath9k_wiphy_scanning(sc);
242 spin_unlock_bh(&sc->wiphy_lock);
243 return ret;
244}
245
246static int __ath9k_wiphy_unpause(struct ath_wiphy *aphy);
247
248/* caller must hold wiphy_lock */
249static void __ath9k_wiphy_unpause_ch(struct ath_wiphy *aphy)
250{
251 if (aphy == NULL)
252 return;
253 if (aphy->chan_idx != aphy->sc->chan_idx)
254 return; /* wiphy not on the selected channel */
255 __ath9k_wiphy_unpause(aphy);
256}
257
258static void ath9k_wiphy_unpause_channel(struct ath_softc *sc)
259{
260 int i;
261 spin_lock_bh(&sc->wiphy_lock);
262 __ath9k_wiphy_unpause_ch(sc->pri_wiphy);
263 for (i = 0; i < sc->num_sec_wiphy; i++)
264 __ath9k_wiphy_unpause_ch(sc->sec_wiphy[i]);
265 spin_unlock_bh(&sc->wiphy_lock);
266}
267
268void ath9k_wiphy_chan_work(struct work_struct *work)
269{
270 struct ath_softc *sc = container_of(work, struct ath_softc, chan_work);
271 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
272 struct ath_wiphy *aphy = sc->next_wiphy;
273
274 if (aphy == NULL)
275 return;
276
277 /*
278 * All pending interfaces paused; ready to change
279 * channels.
280 */
281
282 /* Change channels */
283 mutex_lock(&sc->mutex);
284 /* XXX: remove me eventually */
285 ath9k_update_ichannel(sc, aphy->hw,
286 &sc->sc_ah->channels[sc->chan_idx]);
287
288 /* sync hw configuration for hw code */
289 common->hw = aphy->hw;
290
291 if (ath_set_channel(sc, aphy->hw,
292 &sc->sc_ah->channels[sc->chan_idx]) < 0) {
293 printk(KERN_DEBUG "ath9k: Failed to set channel for new "
294 "virtual wiphy\n");
295 mutex_unlock(&sc->mutex);
296 return;
297 }
298 mutex_unlock(&sc->mutex);
299
300 ath9k_wiphy_unpause_channel(sc);
301}
302
303/*
304 * ath9k version of ieee80211_tx_status() for TX frames that are generated
305 * internally in the driver.
306 */
307void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, int ftype)
308{
309 struct ath_wiphy *aphy = hw->priv;
310 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
311
312 if (ftype == ATH9K_IFT_PAUSE && aphy->state == ATH_WIPHY_PAUSING) {
313 if (!(tx_info->flags & IEEE80211_TX_STAT_ACK)) {
314 printk(KERN_DEBUG "ath9k: %s: no ACK for pause "
315 "frame\n", wiphy_name(hw->wiphy));
316 /*
317 * The AP did not reply; ignore this to allow us to
318 * continue.
319 */
320 }
321 aphy->state = ATH_WIPHY_PAUSED;
322 if (!ath9k_wiphy_pausing(aphy->sc)) {
323 /*
324 * Drop from tasklet to work to allow mutex for channel
325 * change.
326 */
327 ieee80211_queue_work(aphy->sc->hw,
328 &aphy->sc->chan_work);
329 }
330 }
331
332 dev_kfree_skb(skb);
333}
334
335static void ath9k_mark_paused(struct ath_wiphy *aphy)
336{
337 struct ath_softc *sc = aphy->sc;
338 aphy->state = ATH_WIPHY_PAUSED;
339 if (!__ath9k_wiphy_pausing(sc))
340 ieee80211_queue_work(sc->hw, &sc->chan_work);
341}
342
343static void ath9k_pause_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
344{
345 struct ath_wiphy *aphy = data;
346 struct ath_vif *avp = (void *) vif->drv_priv;
347
348 switch (vif->type) {
349 case NL80211_IFTYPE_STATION:
350 if (!vif->bss_conf.assoc) {
351 ath9k_mark_paused(aphy);
352 break;
353 }
354 /* TODO: could avoid this if already in PS mode */
355 if (ath9k_send_nullfunc(aphy, vif, avp->bssid, 1)) {
356 printk(KERN_DEBUG "%s: failed to send PS nullfunc\n",
357 __func__);
358 ath9k_mark_paused(aphy);
359 }
360 break;
361 case NL80211_IFTYPE_AP:
362 /* Beacon transmission is paused by aphy->state change */
363 ath9k_mark_paused(aphy);
364 break;
365 default:
366 break;
367 }
368}
369
370/* caller must hold wiphy_lock */
371static int __ath9k_wiphy_pause(struct ath_wiphy *aphy)
372{
373 ieee80211_stop_queues(aphy->hw);
374 aphy->state = ATH_WIPHY_PAUSING;
375 /*
376 * TODO: handle PAUSING->PAUSED for the case where there are multiple
377 * active vifs (now we do it on the first vif getting ready; should be
378 * on the last)
379 */
380 ieee80211_iterate_active_interfaces_atomic(aphy->hw, ath9k_pause_iter,
381 aphy);
382 return 0;
383}
384
385int ath9k_wiphy_pause(struct ath_wiphy *aphy)
386{
387 int ret;
388 spin_lock_bh(&aphy->sc->wiphy_lock);
389 ret = __ath9k_wiphy_pause(aphy);
390 spin_unlock_bh(&aphy->sc->wiphy_lock);
391 return ret;
392}
393
394static void ath9k_unpause_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
395{
396 struct ath_wiphy *aphy = data;
397 struct ath_vif *avp = (void *) vif->drv_priv;
398
399 switch (vif->type) {
400 case NL80211_IFTYPE_STATION:
401 if (!vif->bss_conf.assoc)
402 break;
403 ath9k_send_nullfunc(aphy, vif, avp->bssid, 0);
404 break;
405 case NL80211_IFTYPE_AP:
406 /* Beacon transmission is re-enabled by aphy->state change */
407 break;
408 default:
409 break;
410 }
411}
412
413/* caller must hold wiphy_lock */
414static int __ath9k_wiphy_unpause(struct ath_wiphy *aphy)
415{
416 ieee80211_iterate_active_interfaces_atomic(aphy->hw,
417 ath9k_unpause_iter, aphy);
418 aphy->state = ATH_WIPHY_ACTIVE;
419 ieee80211_wake_queues(aphy->hw);
420 return 0;
421}
422
423int ath9k_wiphy_unpause(struct ath_wiphy *aphy)
424{
425 int ret;
426 spin_lock_bh(&aphy->sc->wiphy_lock);
427 ret = __ath9k_wiphy_unpause(aphy);
428 spin_unlock_bh(&aphy->sc->wiphy_lock);
429 return ret;
430}
431
432static void __ath9k_wiphy_mark_all_paused(struct ath_softc *sc)
433{
434 int i;
435 if (sc->pri_wiphy->state != ATH_WIPHY_INACTIVE)
436 sc->pri_wiphy->state = ATH_WIPHY_PAUSED;
437 for (i = 0; i < sc->num_sec_wiphy; i++) {
438 if (sc->sec_wiphy[i] &&
439 sc->sec_wiphy[i]->state != ATH_WIPHY_INACTIVE)
440 sc->sec_wiphy[i]->state = ATH_WIPHY_PAUSED;
441 }
442}
443
444/* caller must hold wiphy_lock */
445static void __ath9k_wiphy_pause_all(struct ath_softc *sc)
446{
447 int i;
448 if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE)
449 __ath9k_wiphy_pause(sc->pri_wiphy);
450 for (i = 0; i < sc->num_sec_wiphy; i++) {
451 if (sc->sec_wiphy[i] &&
452 sc->sec_wiphy[i]->state == ATH_WIPHY_ACTIVE)
453 __ath9k_wiphy_pause(sc->sec_wiphy[i]);
454 }
455}
456
457int ath9k_wiphy_select(struct ath_wiphy *aphy)
458{
459 struct ath_softc *sc = aphy->sc;
460 bool now;
461
462 spin_lock_bh(&sc->wiphy_lock);
463 if (__ath9k_wiphy_scanning(sc)) {
464 /*
465 * For now, we are using mac80211 sw scan and it expects to
466 * have full control over channel changes, so avoid wiphy
467 * scheduling during a scan. This could be optimized if the
468 * scanning control were moved into the driver.
469 */
470 spin_unlock_bh(&sc->wiphy_lock);
471 return -EBUSY;
472 }
473 if (__ath9k_wiphy_pausing(sc)) {
474 if (sc->wiphy_select_failures == 0)
475 sc->wiphy_select_first_fail = jiffies;
476 sc->wiphy_select_failures++;
477 if (time_after(jiffies, sc->wiphy_select_first_fail + HZ / 2))
478 {
479 printk(KERN_DEBUG "ath9k: Previous wiphy select timed "
480 "out; disable/enable hw to recover\n");
481 __ath9k_wiphy_mark_all_paused(sc);
482 /*
483 * TODO: this workaround to fix hardware is unlikely to
484 * be specific to virtual wiphy changes. It can happen
485 * on normal channel change, too, and as such, this
486 * should really be made more generic. For example,
487 * tricker radio disable/enable on GTT interrupt burst
488 * (say, 10 GTT interrupts received without any TX
489 * frame being completed)
490 */
491 spin_unlock_bh(&sc->wiphy_lock);
492 ath_radio_disable(sc, aphy->hw);
493 ath_radio_enable(sc, aphy->hw);
494 /* Only the primary wiphy hw is used for queuing work */
495 ieee80211_queue_work(aphy->sc->hw,
496 &aphy->sc->chan_work);
497 return -EBUSY; /* previous select still in progress */
498 }
499 spin_unlock_bh(&sc->wiphy_lock);
500 return -EBUSY; /* previous select still in progress */
501 }
502 sc->wiphy_select_failures = 0;
503
504 /* Store the new channel */
505 sc->chan_idx = aphy->chan_idx;
506 sc->chan_is_ht = aphy->chan_is_ht;
507 sc->next_wiphy = aphy;
508
509 __ath9k_wiphy_pause_all(sc);
510 now = !__ath9k_wiphy_pausing(aphy->sc);
511 spin_unlock_bh(&sc->wiphy_lock);
512
513 if (now) {
514 /* Ready to request channel change immediately */
515 ieee80211_queue_work(aphy->sc->hw, &aphy->sc->chan_work);
516 }
517
518 /*
519 * wiphys will be unpaused in ath9k_tx_status() once channel has been
520 * changed if any wiphy needs time to become paused.
521 */
522
523 return 0;
524}
525
526bool ath9k_wiphy_started(struct ath_softc *sc)
527{
528 int i;
529 spin_lock_bh(&sc->wiphy_lock);
530 if (sc->pri_wiphy->state != ATH_WIPHY_INACTIVE) {
531 spin_unlock_bh(&sc->wiphy_lock);
532 return true;
533 }
534 for (i = 0; i < sc->num_sec_wiphy; i++) {
535 if (sc->sec_wiphy[i] &&
536 sc->sec_wiphy[i]->state != ATH_WIPHY_INACTIVE) {
537 spin_unlock_bh(&sc->wiphy_lock);
538 return true;
539 }
540 }
541 spin_unlock_bh(&sc->wiphy_lock);
542 return false;
543}
544
545static void ath9k_wiphy_pause_chan(struct ath_wiphy *aphy,
546 struct ath_wiphy *selected)
547{
548 if (selected->state == ATH_WIPHY_SCAN) {
549 if (aphy == selected)
550 return;
551 /*
552 * Pause all other wiphys for the duration of the scan even if
553 * they are on the current channel now.
554 */
555 } else if (aphy->chan_idx == selected->chan_idx)
556 return;
557 aphy->state = ATH_WIPHY_PAUSED;
558 ieee80211_stop_queues(aphy->hw);
559}
560
561void ath9k_wiphy_pause_all_forced(struct ath_softc *sc,
562 struct ath_wiphy *selected)
563{
564 int i;
565 spin_lock_bh(&sc->wiphy_lock);
566 if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE)
567 ath9k_wiphy_pause_chan(sc->pri_wiphy, selected);
568 for (i = 0; i < sc->num_sec_wiphy; i++) {
569 if (sc->sec_wiphy[i] &&
570 sc->sec_wiphy[i]->state == ATH_WIPHY_ACTIVE)
571 ath9k_wiphy_pause_chan(sc->sec_wiphy[i], selected);
572 }
573 spin_unlock_bh(&sc->wiphy_lock);
574}
575
576void ath9k_wiphy_work(struct work_struct *work)
577{
578 struct ath_softc *sc = container_of(work, struct ath_softc,
579 wiphy_work.work);
580 struct ath_wiphy *aphy = NULL;
581 bool first = true;
582
583 spin_lock_bh(&sc->wiphy_lock);
584
585 if (sc->wiphy_scheduler_int == 0) {
586 /* wiphy scheduler is disabled */
587 spin_unlock_bh(&sc->wiphy_lock);
588 return;
589 }
590
591try_again:
592 sc->wiphy_scheduler_index++;
593 while (sc->wiphy_scheduler_index <= sc->num_sec_wiphy) {
594 aphy = sc->sec_wiphy[sc->wiphy_scheduler_index - 1];
595 if (aphy && aphy->state != ATH_WIPHY_INACTIVE)
596 break;
597
598 sc->wiphy_scheduler_index++;
599 aphy = NULL;
600 }
601 if (aphy == NULL) {
602 sc->wiphy_scheduler_index = 0;
603 if (sc->pri_wiphy->state == ATH_WIPHY_INACTIVE) {
604 if (first) {
605 first = false;
606 goto try_again;
607 }
608 /* No wiphy is ready to be scheduled */
609 } else
610 aphy = sc->pri_wiphy;
611 }
612
613 spin_unlock_bh(&sc->wiphy_lock);
614
615 if (aphy &&
616 aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN &&
617 ath9k_wiphy_select(aphy)) {
618 printk(KERN_DEBUG "ath9k: Failed to schedule virtual wiphy "
619 "change\n");
620 }
621
622 ieee80211_queue_delayed_work(sc->hw,
623 &sc->wiphy_work,
624 sc->wiphy_scheduler_int);
625}
626
627void ath9k_wiphy_set_scheduler(struct ath_softc *sc, unsigned int msec_int)
628{
629 cancel_delayed_work_sync(&sc->wiphy_work);
630 sc->wiphy_scheduler_int = msecs_to_jiffies(msec_int);
631 if (sc->wiphy_scheduler_int)
632 ieee80211_queue_delayed_work(sc->hw, &sc->wiphy_work,
633 sc->wiphy_scheduler_int);
634}
635
636/* caller must hold wiphy_lock */
637bool ath9k_all_wiphys_idle(struct ath_softc *sc)
638{
639 unsigned int i;
640 if (!sc->pri_wiphy->idle)
641 return false;
642 for (i = 0; i < sc->num_sec_wiphy; i++) {
643 struct ath_wiphy *aphy = sc->sec_wiphy[i];
644 if (!aphy)
645 continue;
646 if (!aphy->idle)
647 return false;
648 }
649 return true;
650}
651
652/* caller must hold wiphy_lock */
653void ath9k_set_wiphy_idle(struct ath_wiphy *aphy, bool idle)
654{
655 struct ath_softc *sc = aphy->sc;
656
657 aphy->idle = idle;
658 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG,
659 "Marking %s as %sidle\n",
660 wiphy_name(aphy->hw->wiphy), idle ? "" : "not-");
661}
662/* Only bother starting a queue on an active virtual wiphy */
663bool ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue)
664{
665 struct ieee80211_hw *hw = sc->pri_wiphy->hw;
666 unsigned int i;
667 bool txq_started = false;
668
669 spin_lock_bh(&sc->wiphy_lock);
670
671 /* Start the primary wiphy */
672 if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE) {
673 ieee80211_wake_queue(hw, skb_queue);
674 txq_started = true;
675 goto unlock;
676 }
677
678 /* Now start the secondary wiphy queues */
679 for (i = 0; i < sc->num_sec_wiphy; i++) {
680 struct ath_wiphy *aphy = sc->sec_wiphy[i];
681 if (!aphy)
682 continue;
683 if (aphy->state != ATH_WIPHY_ACTIVE)
684 continue;
685
686 hw = aphy->hw;
687 ieee80211_wake_queue(hw, skb_queue);
688 txq_started = true;
689 break;
690 }
691
692unlock:
693 spin_unlock_bh(&sc->wiphy_lock);
694 return txq_started;
695}
696
697/* Go ahead and propagate information to all virtual wiphys, it won't hurt */
698void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue)
699{
700 struct ieee80211_hw *hw = sc->pri_wiphy->hw;
701 unsigned int i;
702
703 spin_lock_bh(&sc->wiphy_lock);
704
705 /* Stop the primary wiphy */
706 ieee80211_stop_queue(hw, skb_queue);
707
708 /* Now stop the secondary wiphy queues */
709 for (i = 0; i < sc->num_sec_wiphy; i++) {
710 struct ath_wiphy *aphy = sc->sec_wiphy[i];
711 if (!aphy)
712 continue;
713 hw = aphy->hw;
714 ieee80211_stop_queue(hw, skb_queue);
715 }
716 spin_unlock_bh(&sc->wiphy_lock);
717}
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 07b7804aec5..e16136d6179 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -19,7 +19,6 @@
19 19
20#define BITS_PER_BYTE 8 20#define BITS_PER_BYTE 8
21#define OFDM_PLCP_BITS 22 21#define OFDM_PLCP_BITS 22
22#define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
23#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1) 22#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8 23#define L_STF 8
25#define L_LTF 8 24#define L_LTF 8
@@ -32,7 +31,6 @@
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2) 31#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18) 32#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34 33
35#define OFDM_SIFS_TIME 16
36 34
37static u16 bits_per_symbol[][2] = { 35static u16 bits_per_symbol[][2] = {
38 /* 20MHz 40MHz */ 36 /* 20MHz 40MHz */
@@ -57,8 +55,9 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
57static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, 55static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
58 struct list_head *head); 56 struct list_head *head);
59static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len); 57static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
60static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts, 58static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
61 int nframes, int nbad, int txok, bool update_rc); 59 struct ath_tx_status *ts, int nframes, int nbad,
60 int txok, bool update_rc);
62static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 61static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
63 int seqno); 62 int seqno);
64 63
@@ -169,7 +168,7 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
169 ath_tx_update_baw(sc, tid, fi->seqno); 168 ath_tx_update_baw(sc, tid, fi->seqno);
170 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0); 169 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
171 } else { 170 } else {
172 ath_tx_send_normal(sc, txq, tid, &bf_head); 171 ath_tx_send_normal(sc, txq, NULL, &bf_head);
173 } 172 }
174 spin_lock_bh(&txq->axq_lock); 173 spin_lock_bh(&txq->axq_lock);
175 } 174 }
@@ -297,7 +296,6 @@ static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
297 296
298 ATH_TXBUF_RESET(tbf); 297 ATH_TXBUF_RESET(tbf);
299 298
300 tbf->aphy = bf->aphy;
301 tbf->bf_mpdu = bf->bf_mpdu; 299 tbf->bf_mpdu = bf->bf_mpdu;
302 tbf->bf_buf_addr = bf->bf_buf_addr; 300 tbf->bf_buf_addr = bf->bf_buf_addr;
303 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len); 301 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
@@ -345,7 +343,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
345 struct ath_node *an = NULL; 343 struct ath_node *an = NULL;
346 struct sk_buff *skb; 344 struct sk_buff *skb;
347 struct ieee80211_sta *sta; 345 struct ieee80211_sta *sta;
348 struct ieee80211_hw *hw; 346 struct ieee80211_hw *hw = sc->hw;
349 struct ieee80211_hdr *hdr; 347 struct ieee80211_hdr *hdr;
350 struct ieee80211_tx_info *tx_info; 348 struct ieee80211_tx_info *tx_info;
351 struct ath_atx_tid *tid = NULL; 349 struct ath_atx_tid *tid = NULL;
@@ -364,7 +362,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
364 hdr = (struct ieee80211_hdr *)skb->data; 362 hdr = (struct ieee80211_hdr *)skb->data;
365 363
366 tx_info = IEEE80211_SKB_CB(skb); 364 tx_info = IEEE80211_SKB_CB(skb);
367 hw = bf->aphy->hw;
368 365
369 memcpy(rates, tx_info->control.rates, sizeof(rates)); 366 memcpy(rates, tx_info->control.rates, sizeof(rates));
370 367
@@ -383,7 +380,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
383 !bf->bf_stale || bf_next != NULL) 380 !bf->bf_stale || bf_next != NULL)
384 list_move_tail(&bf->list, &bf_head); 381 list_move_tail(&bf->list, &bf_head);
385 382
386 ath_tx_rc_status(bf, ts, 1, 1, 0, false); 383 ath_tx_rc_status(sc, bf, ts, 1, 1, 0, false);
387 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 384 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
388 0, 0); 385 0, 0);
389 386
@@ -429,7 +426,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
429 426
430 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad); 427 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
431 while (bf) { 428 while (bf) {
432 txfail = txpending = 0; 429 txfail = txpending = sendbar = 0;
433 bf_next = bf->bf_next; 430 bf_next = bf->bf_next;
434 431
435 skb = bf->bf_mpdu; 432 skb = bf->bf_mpdu;
@@ -489,10 +486,10 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
489 486
490 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) { 487 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
491 memcpy(tx_info->control.rates, rates, sizeof(rates)); 488 memcpy(tx_info->control.rates, rates, sizeof(rates));
492 ath_tx_rc_status(bf, ts, nframes, nbad, txok, true); 489 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, true);
493 rc_update = false; 490 rc_update = false;
494 } else { 491 } else {
495 ath_tx_rc_status(bf, ts, nframes, nbad, txok, false); 492 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, false);
496 } 493 }
497 494
498 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 495 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
@@ -516,7 +513,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
516 513
517 bf->bf_state.bf_type |= 514 bf->bf_state.bf_type |=
518 BUF_XRETRY; 515 BUF_XRETRY;
519 ath_tx_rc_status(bf, ts, nframes, 516 ath_tx_rc_status(sc, bf, ts, nframes,
520 nbad, 0, false); 517 nbad, 0, false);
521 ath_tx_complete_buf(sc, bf, txq, 518 ath_tx_complete_buf(sc, bf, txq,
522 &bf_head, 519 &bf_head,
@@ -566,8 +563,11 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
566 563
567 rcu_read_unlock(); 564 rcu_read_unlock();
568 565
569 if (needreset) 566 if (needreset) {
567 spin_unlock_bh(&sc->sc_pcu_lock);
570 ath_reset(sc, false); 568 ath_reset(sc, false);
569 spin_lock_bh(&sc->sc_pcu_lock);
570 }
571} 571}
572 572
573static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf, 573static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
@@ -856,7 +856,10 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
856 856
857 txtid->state |= AGGR_ADDBA_PROGRESS; 857 txtid->state |= AGGR_ADDBA_PROGRESS;
858 txtid->paused = true; 858 txtid->paused = true;
859 *ssn = txtid->seq_start; 859 *ssn = txtid->seq_start = txtid->seq_next;
860
861 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
862 txtid->baw_head = txtid->baw_tail = 0;
860 863
861 return 0; 864 return 0;
862} 865}
@@ -942,7 +945,7 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
942 [WME_AC_VI] = ATH_TXQ_AC_VI, 945 [WME_AC_VI] = ATH_TXQ_AC_VI,
943 [WME_AC_VO] = ATH_TXQ_AC_VO, 946 [WME_AC_VO] = ATH_TXQ_AC_VO,
944 }; 947 };
945 int qnum, i; 948 int axq_qnum, i;
946 949
947 memset(&qi, 0, sizeof(qi)); 950 memset(&qi, 0, sizeof(qi));
948 qi.tqi_subtype = subtype_txq_to_hwq[subtype]; 951 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
@@ -976,24 +979,25 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
976 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE | 979 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
977 TXQ_FLAG_TXDESCINT_ENABLE; 980 TXQ_FLAG_TXDESCINT_ENABLE;
978 } 981 }
979 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi); 982 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
980 if (qnum == -1) { 983 if (axq_qnum == -1) {
981 /* 984 /*
982 * NB: don't print a message, this happens 985 * NB: don't print a message, this happens
983 * normally on parts with too few tx queues 986 * normally on parts with too few tx queues
984 */ 987 */
985 return NULL; 988 return NULL;
986 } 989 }
987 if (qnum >= ARRAY_SIZE(sc->tx.txq)) { 990 if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
988 ath_err(common, "qnum %u out of range, max %zu!\n", 991 ath_err(common, "qnum %u out of range, max %zu!\n",
989 qnum, ARRAY_SIZE(sc->tx.txq)); 992 axq_qnum, ARRAY_SIZE(sc->tx.txq));
990 ath9k_hw_releasetxqueue(ah, qnum); 993 ath9k_hw_releasetxqueue(ah, axq_qnum);
991 return NULL; 994 return NULL;
992 } 995 }
993 if (!ATH_TXQ_SETUP(sc, qnum)) { 996 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
994 struct ath_txq *txq = &sc->tx.txq[qnum]; 997 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
995 998
996 txq->axq_qnum = qnum; 999 txq->axq_qnum = axq_qnum;
1000 txq->mac80211_qnum = -1;
997 txq->axq_link = NULL; 1001 txq->axq_link = NULL;
998 INIT_LIST_HEAD(&txq->axq_q); 1002 INIT_LIST_HEAD(&txq->axq_q);
999 INIT_LIST_HEAD(&txq->axq_acq); 1003 INIT_LIST_HEAD(&txq->axq_acq);
@@ -1001,14 +1005,14 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1001 txq->axq_depth = 0; 1005 txq->axq_depth = 0;
1002 txq->axq_ampdu_depth = 0; 1006 txq->axq_ampdu_depth = 0;
1003 txq->axq_tx_inprogress = false; 1007 txq->axq_tx_inprogress = false;
1004 sc->tx.txqsetup |= 1<<qnum; 1008 sc->tx.txqsetup |= 1<<axq_qnum;
1005 1009
1006 txq->txq_headidx = txq->txq_tailidx = 0; 1010 txq->txq_headidx = txq->txq_tailidx = 0;
1007 for (i = 0; i < ATH_TXFIFO_DEPTH; i++) 1011 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
1008 INIT_LIST_HEAD(&txq->txq_fifo[i]); 1012 INIT_LIST_HEAD(&txq->txq_fifo[i]);
1009 INIT_LIST_HEAD(&txq->txq_fifo_pending); 1013 INIT_LIST_HEAD(&txq->txq_fifo_pending);
1010 } 1014 }
1011 return &sc->tx.txq[qnum]; 1015 return &sc->tx.txq[axq_qnum];
1012} 1016}
1013 1017
1014int ath_txq_update(struct ath_softc *sc, int qnum, 1018int ath_txq_update(struct ath_softc *sc, int qnum,
@@ -1051,6 +1055,7 @@ int ath_txq_update(struct ath_softc *sc, int qnum,
1051int ath_cabq_update(struct ath_softc *sc) 1055int ath_cabq_update(struct ath_softc *sc)
1052{ 1056{
1053 struct ath9k_tx_queue_info qi; 1057 struct ath9k_tx_queue_info qi;
1058 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
1054 int qnum = sc->beacon.cabq->axq_qnum; 1059 int qnum = sc->beacon.cabq->axq_qnum;
1055 1060
1056 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi); 1061 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
@@ -1062,7 +1067,7 @@ int ath_cabq_update(struct ath_softc *sc)
1062 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND) 1067 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1063 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND; 1068 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
1064 1069
1065 qi.tqi_readyTime = (sc->beacon_interval * 1070 qi.tqi_readyTime = (cur_conf->beacon_interval *
1066 sc->config.cabqReadytime) / 100; 1071 sc->config.cabqReadytime) / 100;
1067 ath_txq_update(sc, qnum, &qi); 1072 ath_txq_update(sc, qnum, &qi);
1068 1073
@@ -1205,8 +1210,17 @@ bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
1205 ath_err(common, "Failed to stop TX DMA!\n"); 1210 ath_err(common, "Failed to stop TX DMA!\n");
1206 1211
1207 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 1212 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1208 if (ATH_TXQ_SETUP(sc, i)) 1213 if (!ATH_TXQ_SETUP(sc, i))
1209 ath_draintxq(sc, &sc->tx.txq[i], retry_tx); 1214 continue;
1215
1216 /*
1217 * The caller will resume queues with ieee80211_wake_queues.
1218 * Mark the queue as not stopped to prevent ath_tx_complete
1219 * from waking the queue too early.
1220 */
1221 txq = &sc->tx.txq[i];
1222 txq->stopped = false;
1223 ath_draintxq(sc, txq, retry_tx);
1210 } 1224 }
1211 1225
1212 return !npend; 1226 return !npend;
@@ -1218,46 +1232,59 @@ void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1218 sc->tx.txqsetup &= ~(1<<txq->axq_qnum); 1232 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1219} 1233}
1220 1234
1235/* For each axq_acq entry, for each tid, try to schedule packets
1236 * for transmit until ampdu_depth has reached min Q depth.
1237 */
1221void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) 1238void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1222{ 1239{
1223 struct ath_atx_ac *ac; 1240 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1224 struct ath_atx_tid *tid; 1241 struct ath_atx_tid *tid, *last_tid;
1225 1242
1226 if (list_empty(&txq->axq_acq)) 1243 if (list_empty(&txq->axq_acq) ||
1244 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1227 return; 1245 return;
1228 1246
1229 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list); 1247 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1230 list_del(&ac->list); 1248 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
1231 ac->sched = false;
1232 1249
1233 do { 1250 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1234 if (list_empty(&ac->tid_q)) 1251 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1235 return; 1252 list_del(&ac->list);
1253 ac->sched = false;
1236 1254
1237 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list); 1255 while (!list_empty(&ac->tid_q)) {
1238 list_del(&tid->list); 1256 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1239 tid->sched = false; 1257 list);
1258 list_del(&tid->list);
1259 tid->sched = false;
1240 1260
1241 if (tid->paused) 1261 if (tid->paused)
1242 continue; 1262 continue;
1243 1263
1244 ath_tx_sched_aggr(sc, txq, tid); 1264 ath_tx_sched_aggr(sc, txq, tid);
1245 1265
1246 /* 1266 /*
1247 * add tid to round-robin queue if more frames 1267 * add tid to round-robin queue if more frames
1248 * are pending for the tid 1268 * are pending for the tid
1249 */ 1269 */
1250 if (!list_empty(&tid->buf_q)) 1270 if (!list_empty(&tid->buf_q))
1251 ath_tx_queue_tid(txq, tid); 1271 ath_tx_queue_tid(txq, tid);
1252 1272
1253 break; 1273 if (tid == last_tid ||
1254 } while (!list_empty(&ac->tid_q)); 1274 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1275 break;
1276 }
1255 1277
1256 if (!list_empty(&ac->tid_q)) { 1278 if (!list_empty(&ac->tid_q)) {
1257 if (!ac->sched) { 1279 if (!ac->sched) {
1258 ac->sched = true; 1280 ac->sched = true;
1259 list_add_tail(&ac->list, &txq->axq_acq); 1281 list_add_tail(&ac->list, &txq->axq_acq);
1282 }
1260 } 1283 }
1284
1285 if (ac == last_ac ||
1286 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1287 return;
1261 } 1288 }
1262} 1289}
1263 1290
@@ -1301,6 +1328,7 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1301 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]); 1328 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1302 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]); 1329 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1303 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH); 1330 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
1331 TX_STAT_INC(txq->axq_qnum, puttxbuf);
1304 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 1332 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1305 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n", 1333 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1306 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc); 1334 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
@@ -1308,6 +1336,7 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1308 list_splice_tail_init(head, &txq->axq_q); 1336 list_splice_tail_init(head, &txq->axq_q);
1309 1337
1310 if (txq->axq_link == NULL) { 1338 if (txq->axq_link == NULL) {
1339 TX_STAT_INC(txq->axq_qnum, puttxbuf);
1311 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 1340 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1312 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n", 1341 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1313 txq->axq_qnum, ito64(bf->bf_daddr), 1342 txq->axq_qnum, ito64(bf->bf_daddr),
@@ -1321,6 +1350,7 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1321 } 1350 }
1322 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc, 1351 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1323 &txq->axq_link); 1352 &txq->axq_link);
1353 TX_STAT_INC(txq->axq_qnum, txstart);
1324 ath9k_hw_txstart(ah, txq->axq_qnum); 1354 ath9k_hw_txstart(ah, txq->axq_qnum);
1325 } 1355 }
1326 txq->axq_depth++; 1356 txq->axq_depth++;
@@ -1335,7 +1365,6 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1335 struct list_head bf_head; 1365 struct list_head bf_head;
1336 1366
1337 bf->bf_state.bf_type |= BUF_AMPDU; 1367 bf->bf_state.bf_type |= BUF_AMPDU;
1338 TX_STAT_INC(txctl->txq->axq_qnum, a_queued);
1339 1368
1340 /* 1369 /*
1341 * Do not queue to h/w when any of the following conditions is true: 1370 * Do not queue to h/w when any of the following conditions is true:
@@ -1351,6 +1380,7 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1351 * Add this frame to software queue for scheduling later 1380 * Add this frame to software queue for scheduling later
1352 * for aggregation. 1381 * for aggregation.
1353 */ 1382 */
1383 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
1354 list_add_tail(&bf->list, &tid->buf_q); 1384 list_add_tail(&bf->list, &tid->buf_q);
1355 ath_tx_queue_tid(txctl->txq, tid); 1385 ath_tx_queue_tid(txctl->txq, tid);
1356 return; 1386 return;
@@ -1364,6 +1394,7 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1364 ath_tx_addto_baw(sc, tid, fi->seqno); 1394 ath_tx_addto_baw(sc, tid, fi->seqno);
1365 1395
1366 /* Queue to h/w without aggregation */ 1396 /* Queue to h/w without aggregation */
1397 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
1367 bf->bf_lastbf = bf; 1398 bf->bf_lastbf = bf;
1368 ath_buf_set_rate(sc, bf, fi->framelen); 1399 ath_buf_set_rate(sc, bf, fi->framelen);
1369 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head); 1400 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
@@ -1416,8 +1447,7 @@ static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
1416static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb, 1447static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1417 int framelen) 1448 int framelen)
1418{ 1449{
1419 struct ath_wiphy *aphy = hw->priv; 1450 struct ath_softc *sc = hw->priv;
1420 struct ath_softc *sc = aphy->sc;
1421 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1451 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1422 struct ieee80211_sta *sta = tx_info->control.sta; 1452 struct ieee80211_sta *sta = tx_info->control.sta;
1423 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key; 1453 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
@@ -1635,8 +1665,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
1635 struct ath_txq *txq, 1665 struct ath_txq *txq,
1636 struct sk_buff *skb) 1666 struct sk_buff *skb)
1637{ 1667{
1638 struct ath_wiphy *aphy = hw->priv; 1668 struct ath_softc *sc = hw->priv;
1639 struct ath_softc *sc = aphy->sc;
1640 struct ath_hw *ah = sc->sc_ah; 1669 struct ath_hw *ah = sc->sc_ah;
1641 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1670 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1642 struct ath_frame_info *fi = get_frame_info(skb); 1671 struct ath_frame_info *fi = get_frame_info(skb);
@@ -1652,7 +1681,6 @@ static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
1652 1681
1653 ATH_TXBUF_RESET(bf); 1682 ATH_TXBUF_RESET(bf);
1654 1683
1655 bf->aphy = aphy;
1656 bf->bf_flags = setup_tx_flags(skb); 1684 bf->bf_flags = setup_tx_flags(skb);
1657 bf->bf_mpdu = skb; 1685 bf->bf_mpdu = skb;
1658 1686
@@ -1741,8 +1769,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1741 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1769 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1742 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1770 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1743 struct ieee80211_sta *sta = info->control.sta; 1771 struct ieee80211_sta *sta = info->control.sta;
1744 struct ath_wiphy *aphy = hw->priv; 1772 struct ath_softc *sc = hw->priv;
1745 struct ath_softc *sc = aphy->sc;
1746 struct ath_txq *txq = txctl->txq; 1773 struct ath_txq *txq = txctl->txq;
1747 struct ath_buf *bf; 1774 struct ath_buf *bf;
1748 int padpos, padsize; 1775 int padpos, padsize;
@@ -1794,7 +1821,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1794 spin_lock_bh(&txq->axq_lock); 1821 spin_lock_bh(&txq->axq_lock);
1795 if (txq == sc->tx.txq_map[q] && 1822 if (txq == sc->tx.txq_map[q] &&
1796 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) { 1823 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
1797 ath_mac80211_stop_queue(sc, q); 1824 ieee80211_stop_queue(sc->hw, q);
1798 txq->stopped = 1; 1825 txq->stopped = 1;
1799 } 1826 }
1800 spin_unlock_bh(&txq->axq_lock); 1827 spin_unlock_bh(&txq->axq_lock);
@@ -1809,8 +1836,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1809/*****************/ 1836/*****************/
1810 1837
1811static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, 1838static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1812 struct ath_wiphy *aphy, int tx_flags, int ftype, 1839 int tx_flags, int ftype, struct ath_txq *txq)
1813 struct ath_txq *txq)
1814{ 1840{
1815 struct ieee80211_hw *hw = sc->hw; 1841 struct ieee80211_hw *hw = sc->hw;
1816 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1842 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -1820,9 +1846,6 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1820 1846
1821 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb); 1847 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
1822 1848
1823 if (aphy)
1824 hw = aphy->hw;
1825
1826 if (tx_flags & ATH_TX_BAR) 1849 if (tx_flags & ATH_TX_BAR)
1827 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; 1850 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
1828 1851
@@ -1852,19 +1875,20 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1852 PS_WAIT_FOR_TX_ACK)); 1875 PS_WAIT_FOR_TX_ACK));
1853 } 1876 }
1854 1877
1855 if (unlikely(ftype)) 1878 q = skb_get_queue_mapping(skb);
1856 ath9k_tx_status(hw, skb, ftype); 1879 if (txq == sc->tx.txq_map[q]) {
1857 else { 1880 spin_lock_bh(&txq->axq_lock);
1858 q = skb_get_queue_mapping(skb); 1881 if (WARN_ON(--txq->pending_frames < 0))
1859 if (txq == sc->tx.txq_map[q]) { 1882 txq->pending_frames = 0;
1860 spin_lock_bh(&txq->axq_lock);
1861 if (WARN_ON(--txq->pending_frames < 0))
1862 txq->pending_frames = 0;
1863 spin_unlock_bh(&txq->axq_lock);
1864 }
1865 1883
1866 ieee80211_tx_status(hw, skb); 1884 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
1885 ieee80211_wake_queue(sc->hw, q);
1886 txq->stopped = 0;
1887 }
1888 spin_unlock_bh(&txq->axq_lock);
1867 } 1889 }
1890
1891 ieee80211_tx_status(hw, skb);
1868} 1892}
1869 1893
1870static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, 1894static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
@@ -1896,8 +1920,8 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
1896 else 1920 else
1897 complete(&sc->paprd_complete); 1921 complete(&sc->paprd_complete);
1898 } else { 1922 } else {
1899 ath_debug_stat_tx(sc, bf, ts); 1923 ath_debug_stat_tx(sc, bf, ts, txq);
1900 ath_tx_complete(sc, skb, bf->aphy, tx_flags, 1924 ath_tx_complete(sc, skb, tx_flags,
1901 bf->bf_state.bfs_ftype, txq); 1925 bf->bf_state.bfs_ftype, txq);
1902 } 1926 }
1903 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't 1927 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
@@ -1913,14 +1937,14 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
1913 spin_unlock_irqrestore(&sc->tx.txbuflock, flags); 1937 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
1914} 1938}
1915 1939
1916static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts, 1940static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
1917 int nframes, int nbad, int txok, bool update_rc) 1941 struct ath_tx_status *ts, int nframes, int nbad,
1942 int txok, bool update_rc)
1918{ 1943{
1919 struct sk_buff *skb = bf->bf_mpdu; 1944 struct sk_buff *skb = bf->bf_mpdu;
1920 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1945 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1921 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1946 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1922 struct ieee80211_hw *hw = bf->aphy->hw; 1947 struct ieee80211_hw *hw = sc->hw;
1923 struct ath_softc *sc = bf->aphy->sc;
1924 struct ath_hw *ah = sc->sc_ah; 1948 struct ath_hw *ah = sc->sc_ah;
1925 u8 i, tx_rateindex; 1949 u8 i, tx_rateindex;
1926 1950
@@ -1971,19 +1995,6 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
1971 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1; 1995 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
1972} 1996}
1973 1997
1974static void ath_wake_mac80211_queue(struct ath_softc *sc, int qnum)
1975{
1976 struct ath_txq *txq;
1977
1978 txq = sc->tx.txq_map[qnum];
1979 spin_lock_bh(&txq->axq_lock);
1980 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
1981 if (ath_mac80211_start_queue(sc, qnum))
1982 txq->stopped = 0;
1983 }
1984 spin_unlock_bh(&txq->axq_lock);
1985}
1986
1987static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) 1998static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1988{ 1999{
1989 struct ath_hw *ah = sc->sc_ah; 2000 struct ath_hw *ah = sc->sc_ah;
@@ -1994,7 +2005,6 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1994 struct ath_tx_status ts; 2005 struct ath_tx_status ts;
1995 int txok; 2006 int txok;
1996 int status; 2007 int status;
1997 int qnum;
1998 2008
1999 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n", 2009 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2000 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum), 2010 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
@@ -2004,6 +2014,9 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2004 spin_lock_bh(&txq->axq_lock); 2014 spin_lock_bh(&txq->axq_lock);
2005 if (list_empty(&txq->axq_q)) { 2015 if (list_empty(&txq->axq_q)) {
2006 txq->axq_link = NULL; 2016 txq->axq_link = NULL;
2017 if (sc->sc_flags & SC_OP_TXAGGR &&
2018 !txq->txq_flush_inprogress)
2019 ath_txq_schedule(sc, txq);
2007 spin_unlock_bh(&txq->axq_lock); 2020 spin_unlock_bh(&txq->axq_lock);
2008 break; 2021 break;
2009 } 2022 }
@@ -2038,6 +2051,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2038 spin_unlock_bh(&txq->axq_lock); 2051 spin_unlock_bh(&txq->axq_lock);
2039 break; 2052 break;
2040 } 2053 }
2054 TX_STAT_INC(txq->axq_qnum, txprocdesc);
2041 2055
2042 /* 2056 /*
2043 * Remove ath_buf's of the same transmit unit from txq, 2057 * Remove ath_buf's of the same transmit unit from txq,
@@ -2058,6 +2072,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2058 2072
2059 if (bf_is_ampdu_not_probing(bf)) 2073 if (bf_is_ampdu_not_probing(bf))
2060 txq->axq_ampdu_depth--; 2074 txq->axq_ampdu_depth--;
2075
2061 spin_unlock_bh(&txq->axq_lock); 2076 spin_unlock_bh(&txq->axq_lock);
2062 2077
2063 if (bf_held) 2078 if (bf_held)
@@ -2070,27 +2085,45 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2070 */ 2085 */
2071 if (ts.ts_status & ATH9K_TXERR_XRETRY) 2086 if (ts.ts_status & ATH9K_TXERR_XRETRY)
2072 bf->bf_state.bf_type |= BUF_XRETRY; 2087 bf->bf_state.bf_type |= BUF_XRETRY;
2073 ath_tx_rc_status(bf, &ts, 1, txok ? 0 : 1, txok, true); 2088 ath_tx_rc_status(sc, bf, &ts, 1, txok ? 0 : 1, txok, true);
2074 } 2089 }
2075 2090
2076 qnum = skb_get_queue_mapping(bf->bf_mpdu);
2077
2078 if (bf_isampdu(bf)) 2091 if (bf_isampdu(bf))
2079 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok, 2092 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok,
2080 true); 2093 true);
2081 else 2094 else
2082 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0); 2095 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
2083 2096
2084 if (txq == sc->tx.txq_map[qnum])
2085 ath_wake_mac80211_queue(sc, qnum);
2086
2087 spin_lock_bh(&txq->axq_lock); 2097 spin_lock_bh(&txq->axq_lock);
2088 if (sc->sc_flags & SC_OP_TXAGGR) 2098
2099 if (sc->sc_flags & SC_OP_TXAGGR && !txq->txq_flush_inprogress)
2089 ath_txq_schedule(sc, txq); 2100 ath_txq_schedule(sc, txq);
2090 spin_unlock_bh(&txq->axq_lock); 2101 spin_unlock_bh(&txq->axq_lock);
2091 } 2102 }
2092} 2103}
2093 2104
2105static void ath_hw_pll_work(struct work_struct *work)
2106{
2107 struct ath_softc *sc = container_of(work, struct ath_softc,
2108 hw_pll_work.work);
2109 static int count;
2110
2111 if (AR_SREV_9485(sc->sc_ah)) {
2112 if (ar9003_get_pll_sqsum_dvc(sc->sc_ah) >= 0x40000) {
2113 count++;
2114
2115 if (count == 3) {
2116 /* Rx is hung for more than 500ms. Reset it */
2117 ath_reset(sc, true);
2118 count = 0;
2119 }
2120 } else
2121 count = 0;
2122
2123 ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/5);
2124 }
2125}
2126
2094static void ath_tx_complete_poll_work(struct work_struct *work) 2127static void ath_tx_complete_poll_work(struct work_struct *work)
2095{ 2128{
2096 struct ath_softc *sc = container_of(work, struct ath_softc, 2129 struct ath_softc *sc = container_of(work, struct ath_softc,
@@ -2098,6 +2131,9 @@ static void ath_tx_complete_poll_work(struct work_struct *work)
2098 struct ath_txq *txq; 2131 struct ath_txq *txq;
2099 int i; 2132 int i;
2100 bool needreset = false; 2133 bool needreset = false;
2134#ifdef CONFIG_ATH9K_DEBUGFS
2135 sc->tx_complete_poll_work_seen++;
2136#endif
2101 2137
2102 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) 2138 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2103 if (ATH_TXQ_SETUP(sc, i)) { 2139 if (ATH_TXQ_SETUP(sc, i)) {
@@ -2111,6 +2147,33 @@ static void ath_tx_complete_poll_work(struct work_struct *work)
2111 } else { 2147 } else {
2112 txq->axq_tx_inprogress = true; 2148 txq->axq_tx_inprogress = true;
2113 } 2149 }
2150 } else {
2151 /* If the queue has pending buffers, then it
2152 * should be doing tx work (and have axq_depth).
2153 * Shouldn't get to this state I think..but
2154 * we do.
2155 */
2156 if (!(sc->sc_flags & (SC_OP_OFFCHANNEL)) &&
2157 (txq->pending_frames > 0 ||
2158 !list_empty(&txq->axq_acq) ||
2159 txq->stopped)) {
2160 ath_err(ath9k_hw_common(sc->sc_ah),
2161 "txq: %p axq_qnum: %u,"
2162 " mac80211_qnum: %i"
2163 " axq_link: %p"
2164 " pending frames: %i"
2165 " axq_acq empty: %i"
2166 " stopped: %i"
2167 " axq_depth: 0 Attempting to"
2168 " restart tx logic.\n",
2169 txq, txq->axq_qnum,
2170 txq->mac80211_qnum,
2171 txq->axq_link,
2172 txq->pending_frames,
2173 list_empty(&txq->axq_acq),
2174 txq->stopped);
2175 ath_txq_schedule(sc, txq);
2176 }
2114 } 2177 }
2115 spin_unlock_bh(&txq->axq_lock); 2178 spin_unlock_bh(&txq->axq_lock);
2116 } 2179 }
@@ -2150,7 +2213,6 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
2150 struct list_head bf_head; 2213 struct list_head bf_head;
2151 int status; 2214 int status;
2152 int txok; 2215 int txok;
2153 int qnum;
2154 2216
2155 for (;;) { 2217 for (;;) {
2156 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs); 2218 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
@@ -2193,11 +2255,9 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
2193 if (!bf_isampdu(bf)) { 2255 if (!bf_isampdu(bf)) {
2194 if (txs.ts_status & ATH9K_TXERR_XRETRY) 2256 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2195 bf->bf_state.bf_type |= BUF_XRETRY; 2257 bf->bf_state.bf_type |= BUF_XRETRY;
2196 ath_tx_rc_status(bf, &txs, 1, txok ? 0 : 1, txok, true); 2258 ath_tx_rc_status(sc, bf, &txs, 1, txok ? 0 : 1, txok, true);
2197 } 2259 }
2198 2260
2199 qnum = skb_get_queue_mapping(bf->bf_mpdu);
2200
2201 if (bf_isampdu(bf)) 2261 if (bf_isampdu(bf))
2202 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, 2262 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs,
2203 txok, true); 2263 txok, true);
@@ -2205,19 +2265,20 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
2205 ath_tx_complete_buf(sc, bf, txq, &bf_head, 2265 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2206 &txs, txok, 0); 2266 &txs, txok, 0);
2207 2267
2208 if (txq == sc->tx.txq_map[qnum])
2209 ath_wake_mac80211_queue(sc, qnum);
2210
2211 spin_lock_bh(&txq->axq_lock); 2268 spin_lock_bh(&txq->axq_lock);
2212 if (!list_empty(&txq->txq_fifo_pending)) { 2269
2213 INIT_LIST_HEAD(&bf_head); 2270 if (!txq->txq_flush_inprogress) {
2214 bf = list_first_entry(&txq->txq_fifo_pending, 2271 if (!list_empty(&txq->txq_fifo_pending)) {
2215 struct ath_buf, list); 2272 INIT_LIST_HEAD(&bf_head);
2216 list_cut_position(&bf_head, &txq->txq_fifo_pending, 2273 bf = list_first_entry(&txq->txq_fifo_pending,
2217 &bf->bf_lastbf->list); 2274 struct ath_buf, list);
2218 ath_tx_txqaddbuf(sc, txq, &bf_head); 2275 list_cut_position(&bf_head,
2219 } else if (sc->sc_flags & SC_OP_TXAGGR) 2276 &txq->txq_fifo_pending,
2220 ath_txq_schedule(sc, txq); 2277 &bf->bf_lastbf->list);
2278 ath_tx_txqaddbuf(sc, txq, &bf_head);
2279 } else if (sc->sc_flags & SC_OP_TXAGGR)
2280 ath_txq_schedule(sc, txq);
2281 }
2221 spin_unlock_bh(&txq->axq_lock); 2282 spin_unlock_bh(&txq->axq_lock);
2222 } 2283 }
2223} 2284}
@@ -2285,6 +2346,7 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
2285 } 2346 }
2286 2347
2287 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work); 2348 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2349 INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
2288 2350
2289 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 2351 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2290 error = ath_tx_edma_init(sc); 2352 error = ath_tx_edma_init(sc);
diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
index d07ff7f2fd9..420d437f958 100644
--- a/drivers/net/wireless/ath/carl9170/carl9170.h
+++ b/drivers/net/wireless/ath/carl9170/carl9170.h
@@ -283,6 +283,7 @@ struct ar9170 {
283 unsigned int mem_blocks; 283 unsigned int mem_blocks;
284 unsigned int mem_block_size; 284 unsigned int mem_block_size;
285 unsigned int rx_size; 285 unsigned int rx_size;
286 unsigned int tx_seq_table;
286 } fw; 287 } fw;
287 288
288 /* reset / stuck frames/queue detection */ 289 /* reset / stuck frames/queue detection */
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
index 546b4e4ec5e..9517ede9e2d 100644
--- a/drivers/net/wireless/ath/carl9170/fw.c
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -150,6 +150,7 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
150 const struct carl9170fw_otus_desc *otus_desc; 150 const struct carl9170fw_otus_desc *otus_desc;
151 const struct carl9170fw_chk_desc *chk_desc; 151 const struct carl9170fw_chk_desc *chk_desc;
152 const struct carl9170fw_last_desc *last_desc; 152 const struct carl9170fw_last_desc *last_desc;
153 const struct carl9170fw_txsq_desc *txsq_desc;
153 154
154 last_desc = carl9170_fw_find_desc(ar, LAST_MAGIC, 155 last_desc = carl9170_fw_find_desc(ar, LAST_MAGIC,
155 sizeof(*last_desc), CARL9170FW_LAST_DESC_CUR_VER); 156 sizeof(*last_desc), CARL9170FW_LAST_DESC_CUR_VER);
@@ -264,6 +265,9 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
264 FIF_PROMISC_IN_BSS; 265 FIF_PROMISC_IN_BSS;
265 } 266 }
266 267
268 if (SUPP(CARL9170FW_WOL))
269 device_set_wakeup_enable(&ar->udev->dev, true);
270
267 ar->fw.vif_num = otus_desc->vif_num; 271 ar->fw.vif_num = otus_desc->vif_num;
268 ar->fw.cmd_bufs = otus_desc->cmd_bufs; 272 ar->fw.cmd_bufs = otus_desc->cmd_bufs;
269 ar->fw.address = le32_to_cpu(otus_desc->fw_address); 273 ar->fw.address = le32_to_cpu(otus_desc->fw_address);
@@ -296,6 +300,17 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
296 } 300 }
297 } 301 }
298 302
303 txsq_desc = carl9170_fw_find_desc(ar, TXSQ_MAGIC,
304 sizeof(*txsq_desc), CARL9170FW_TXSQ_DESC_CUR_VER);
305
306 if (txsq_desc) {
307 ar->fw.tx_seq_table = le32_to_cpu(txsq_desc->seq_table_addr);
308 if (!valid_cpu_addr(ar->fw.tx_seq_table))
309 return -EINVAL;
310 } else {
311 ar->fw.tx_seq_table = 0;
312 }
313
299#undef SUPPORTED 314#undef SUPPORTED
300 return 0; 315 return 0;
301} 316}
diff --git a/drivers/net/wireless/ath/carl9170/fwcmd.h b/drivers/net/wireless/ath/carl9170/fwcmd.h
index 3680dfc70f4..30449d21b76 100644
--- a/drivers/net/wireless/ath/carl9170/fwcmd.h
+++ b/drivers/net/wireless/ath/carl9170/fwcmd.h
@@ -167,6 +167,7 @@ struct carl9170_rx_filter_cmd {
167#define CARL9170_RX_FILTER_CTL_BACKR 0x20 167#define CARL9170_RX_FILTER_CTL_BACKR 0x20
168#define CARL9170_RX_FILTER_MGMT 0x40 168#define CARL9170_RX_FILTER_MGMT 0x40
169#define CARL9170_RX_FILTER_DATA 0x80 169#define CARL9170_RX_FILTER_DATA 0x80
170#define CARL9170_RX_FILTER_EVERYTHING (~0)
170 171
171struct carl9170_bcn_ctrl_cmd { 172struct carl9170_bcn_ctrl_cmd {
172 __le32 vif_id; 173 __le32 vif_id;
diff --git a/drivers/net/wireless/ath/carl9170/fwdesc.h b/drivers/net/wireless/ath/carl9170/fwdesc.h
index 71f3821f605..921066822dd 100644
--- a/drivers/net/wireless/ath/carl9170/fwdesc.h
+++ b/drivers/net/wireless/ath/carl9170/fwdesc.h
@@ -69,6 +69,9 @@ enum carl9170fw_feature_list {
69 /* Firmware RX filter | CARL9170_CMD_RX_FILTER */ 69 /* Firmware RX filter | CARL9170_CMD_RX_FILTER */
70 CARL9170FW_RX_FILTER, 70 CARL9170FW_RX_FILTER,
71 71
72 /* Wake up on WLAN */
73 CARL9170FW_WOL,
74
72 /* KEEP LAST */ 75 /* KEEP LAST */
73 __CARL9170FW_FEATURE_NUM 76 __CARL9170FW_FEATURE_NUM
74}; 77};
@@ -78,6 +81,7 @@ enum carl9170fw_feature_list {
78#define FIX_MAGIC "FIX\0" 81#define FIX_MAGIC "FIX\0"
79#define DBG_MAGIC "DBG\0" 82#define DBG_MAGIC "DBG\0"
80#define CHK_MAGIC "CHK\0" 83#define CHK_MAGIC "CHK\0"
84#define TXSQ_MAGIC "TXSQ"
81#define LAST_MAGIC "LAST" 85#define LAST_MAGIC "LAST"
82 86
83#define CARL9170FW_SET_DAY(d) (((d) - 1) % 31) 87#define CARL9170FW_SET_DAY(d) (((d) - 1) % 31)
@@ -88,8 +92,10 @@ enum carl9170fw_feature_list {
88#define CARL9170FW_GET_MONTH(m) ((((m) / 31) % 12) + 1) 92#define CARL9170FW_GET_MONTH(m) ((((m) / 31) % 12) + 1)
89#define CARL9170FW_GET_YEAR(y) ((y) / 372 + 10) 93#define CARL9170FW_GET_YEAR(y) ((y) / 372 + 10)
90 94
95#define CARL9170FW_MAGIC_SIZE 4
96
91struct carl9170fw_desc_head { 97struct carl9170fw_desc_head {
92 u8 magic[4]; 98 u8 magic[CARL9170FW_MAGIC_SIZE];
93 __le16 length; 99 __le16 length;
94 u8 min_ver; 100 u8 min_ver;
95 u8 cur_ver; 101 u8 cur_ver;
@@ -170,6 +176,16 @@ struct carl9170fw_chk_desc {
170#define CARL9170FW_CHK_DESC_SIZE \ 176#define CARL9170FW_CHK_DESC_SIZE \
171 (sizeof(struct carl9170fw_chk_desc)) 177 (sizeof(struct carl9170fw_chk_desc))
172 178
179#define CARL9170FW_TXSQ_DESC_MIN_VER 1
180#define CARL9170FW_TXSQ_DESC_CUR_VER 1
181struct carl9170fw_txsq_desc {
182 struct carl9170fw_desc_head head;
183
184 __le32 seq_table_addr;
185} __packed;
186#define CARL9170FW_TXSQ_DESC_SIZE \
187 (sizeof(struct carl9170fw_txsq_desc))
188
173#define CARL9170FW_LAST_DESC_MIN_VER 1 189#define CARL9170FW_LAST_DESC_MIN_VER 1
174#define CARL9170FW_LAST_DESC_CUR_VER 2 190#define CARL9170FW_LAST_DESC_CUR_VER 2
175struct carl9170fw_last_desc { 191struct carl9170fw_last_desc {
@@ -189,8 +205,8 @@ struct carl9170fw_last_desc {
189 } 205 }
190 206
191static inline void carl9170fw_fill_desc(struct carl9170fw_desc_head *head, 207static inline void carl9170fw_fill_desc(struct carl9170fw_desc_head *head,
192 u8 magic[4], __le16 length, 208 u8 magic[CARL9170FW_MAGIC_SIZE],
193 u8 min_ver, u8 cur_ver) 209 __le16 length, u8 min_ver, u8 cur_ver)
194{ 210{
195 head->magic[0] = magic[0]; 211 head->magic[0] = magic[0];
196 head->magic[1] = magic[1]; 212 head->magic[1] = magic[1];
@@ -204,7 +220,7 @@ static inline void carl9170fw_fill_desc(struct carl9170fw_desc_head *head,
204 220
205#define carl9170fw_for_each_hdr(desc, fw_desc) \ 221#define carl9170fw_for_each_hdr(desc, fw_desc) \
206 for (desc = fw_desc; \ 222 for (desc = fw_desc; \
207 memcmp(desc->magic, LAST_MAGIC, 4) && \ 223 memcmp(desc->magic, LAST_MAGIC, CARL9170FW_MAGIC_SIZE) && \
208 le16_to_cpu(desc->length) >= CARL9170FW_DESC_HEAD_SIZE && \ 224 le16_to_cpu(desc->length) >= CARL9170FW_DESC_HEAD_SIZE && \
209 le16_to_cpu(desc->length) < CARL9170FW_DESC_MAX_LENGTH; \ 225 le16_to_cpu(desc->length) < CARL9170FW_DESC_MAX_LENGTH; \
210 desc = (void *)((unsigned long)desc + le16_to_cpu(desc->length))) 226 desc = (void *)((unsigned long)desc + le16_to_cpu(desc->length)))
@@ -218,8 +234,8 @@ static inline bool carl9170fw_supports(__le32 list, u8 feature)
218} 234}
219 235
220static inline bool carl9170fw_desc_cmp(const struct carl9170fw_desc_head *head, 236static inline bool carl9170fw_desc_cmp(const struct carl9170fw_desc_head *head,
221 const u8 descid[4], u16 min_len, 237 const u8 descid[CARL9170FW_MAGIC_SIZE],
222 u8 compatible_revision) 238 u16 min_len, u8 compatible_revision)
223{ 239{
224 if (descid[0] == head->magic[0] && descid[1] == head->magic[1] && 240 if (descid[0] == head->magic[0] && descid[1] == head->magic[1] &&
225 descid[2] == head->magic[2] && descid[3] == head->magic[3] && 241 descid[2] == head->magic[2] && descid[3] == head->magic[3] &&
diff --git a/drivers/net/wireless/ath/carl9170/hw.h b/drivers/net/wireless/ath/carl9170/hw.h
index e85df6edfed..4e30762dd90 100644
--- a/drivers/net/wireless/ath/carl9170/hw.h
+++ b/drivers/net/wireless/ath/carl9170/hw.h
@@ -463,6 +463,8 @@
463 463
464#define AR9170_PWR_REG_CHIP_REVISION (AR9170_PWR_REG_BASE + 0x010) 464#define AR9170_PWR_REG_CHIP_REVISION (AR9170_PWR_REG_BASE + 0x010)
465#define AR9170_PWR_REG_PLL_ADDAC (AR9170_PWR_REG_BASE + 0x014) 465#define AR9170_PWR_REG_PLL_ADDAC (AR9170_PWR_REG_BASE + 0x014)
466#define AR9170_PWR_PLL_ADDAC_DIV_S 2
467#define AR9170_PWR_PLL_ADDAC_DIV 0xffc
466#define AR9170_PWR_REG_WATCH_DOG_MAGIC (AR9170_PWR_REG_BASE + 0x020) 468#define AR9170_PWR_REG_WATCH_DOG_MAGIC (AR9170_PWR_REG_BASE + 0x020)
467 469
468/* Faraday USB Controller */ 470/* Faraday USB Controller */
@@ -471,6 +473,9 @@
471#define AR9170_USB_REG_MAIN_CTRL (AR9170_USB_REG_BASE + 0x000) 473#define AR9170_USB_REG_MAIN_CTRL (AR9170_USB_REG_BASE + 0x000)
472#define AR9170_USB_MAIN_CTRL_REMOTE_WAKEUP BIT(0) 474#define AR9170_USB_MAIN_CTRL_REMOTE_WAKEUP BIT(0)
473#define AR9170_USB_MAIN_CTRL_ENABLE_GLOBAL_INT BIT(2) 475#define AR9170_USB_MAIN_CTRL_ENABLE_GLOBAL_INT BIT(2)
476#define AR9170_USB_MAIN_CTRL_GO_TO_SUSPEND BIT(3)
477#define AR9170_USB_MAIN_CTRL_RESET BIT(4)
478#define AR9170_USB_MAIN_CTRL_CHIP_ENABLE BIT(5)
474#define AR9170_USB_MAIN_CTRL_HIGHSPEED BIT(6) 479#define AR9170_USB_MAIN_CTRL_HIGHSPEED BIT(6)
475 480
476#define AR9170_USB_REG_DEVICE_ADDRESS (AR9170_USB_REG_BASE + 0x001) 481#define AR9170_USB_REG_DEVICE_ADDRESS (AR9170_USB_REG_BASE + 0x001)
@@ -499,6 +504,13 @@
499#define AR9170_USB_REG_INTR_GROUP (AR9170_USB_REG_BASE + 0x020) 504#define AR9170_USB_REG_INTR_GROUP (AR9170_USB_REG_BASE + 0x020)
500 505
501#define AR9170_USB_REG_INTR_SOURCE_0 (AR9170_USB_REG_BASE + 0x021) 506#define AR9170_USB_REG_INTR_SOURCE_0 (AR9170_USB_REG_BASE + 0x021)
507#define AR9170_USB_INTR_SRC0_SETUP BIT(0)
508#define AR9170_USB_INTR_SRC0_IN BIT(1)
509#define AR9170_USB_INTR_SRC0_OUT BIT(2)
510#define AR9170_USB_INTR_SRC0_FAIL BIT(3) /* ??? */
511#define AR9170_USB_INTR_SRC0_END BIT(4) /* ??? */
512#define AR9170_USB_INTR_SRC0_ABORT BIT(7)
513
502#define AR9170_USB_REG_INTR_SOURCE_1 (AR9170_USB_REG_BASE + 0x022) 514#define AR9170_USB_REG_INTR_SOURCE_1 (AR9170_USB_REG_BASE + 0x022)
503#define AR9170_USB_REG_INTR_SOURCE_2 (AR9170_USB_REG_BASE + 0x023) 515#define AR9170_USB_REG_INTR_SOURCE_2 (AR9170_USB_REG_BASE + 0x023)
504#define AR9170_USB_REG_INTR_SOURCE_3 (AR9170_USB_REG_BASE + 0x024) 516#define AR9170_USB_REG_INTR_SOURCE_3 (AR9170_USB_REG_BASE + 0x024)
@@ -506,6 +518,15 @@
506#define AR9170_USB_REG_INTR_SOURCE_5 (AR9170_USB_REG_BASE + 0x026) 518#define AR9170_USB_REG_INTR_SOURCE_5 (AR9170_USB_REG_BASE + 0x026)
507#define AR9170_USB_REG_INTR_SOURCE_6 (AR9170_USB_REG_BASE + 0x027) 519#define AR9170_USB_REG_INTR_SOURCE_6 (AR9170_USB_REG_BASE + 0x027)
508#define AR9170_USB_REG_INTR_SOURCE_7 (AR9170_USB_REG_BASE + 0x028) 520#define AR9170_USB_REG_INTR_SOURCE_7 (AR9170_USB_REG_BASE + 0x028)
521#define AR9170_USB_INTR_SRC7_USB_RESET BIT(1)
522#define AR9170_USB_INTR_SRC7_USB_SUSPEND BIT(2)
523#define AR9170_USB_INTR_SRC7_USB_RESUME BIT(3)
524#define AR9170_USB_INTR_SRC7_ISO_SEQ_ERR BIT(4)
525#define AR9170_USB_INTR_SRC7_ISO_SEQ_ABORT BIT(5)
526#define AR9170_USB_INTR_SRC7_TX0BYTE BIT(6)
527#define AR9170_USB_INTR_SRC7_RX0BYTE BIT(7)
528
529#define AR9170_USB_REG_IDLE_COUNT (AR9170_USB_REG_BASE + 0x02f)
509 530
510#define AR9170_USB_REG_EP_MAP (AR9170_USB_REG_BASE + 0x030) 531#define AR9170_USB_REG_EP_MAP (AR9170_USB_REG_BASE + 0x030)
511#define AR9170_USB_REG_EP1_MAP (AR9170_USB_REG_BASE + 0x030) 532#define AR9170_USB_REG_EP1_MAP (AR9170_USB_REG_BASE + 0x030)
@@ -581,6 +602,10 @@
581 602
582#define AR9170_USB_REG_MAX_AGG_UPLOAD (AR9170_USB_REG_BASE + 0x110) 603#define AR9170_USB_REG_MAX_AGG_UPLOAD (AR9170_USB_REG_BASE + 0x110)
583#define AR9170_USB_REG_UPLOAD_TIME_CTL (AR9170_USB_REG_BASE + 0x114) 604#define AR9170_USB_REG_UPLOAD_TIME_CTL (AR9170_USB_REG_BASE + 0x114)
605
606#define AR9170_USB_REG_WAKE_UP (AR9170_USB_REG_BASE + 0x120)
607#define AR9170_USB_WAKE_UP_WAKE BIT(0)
608
584#define AR9170_USB_REG_CBUS_CTRL (AR9170_USB_REG_BASE + 0x1f0) 609#define AR9170_USB_REG_CBUS_CTRL (AR9170_USB_REG_BASE + 0x1f0)
585#define AR9170_USB_CBUS_CTRL_BUFFER_END (BIT(1)) 610#define AR9170_USB_CBUS_CTRL_BUFFER_END (BIT(1))
586 611
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 870df8c4262..ede3d7e5a04 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -662,6 +662,13 @@ init:
662 goto unlock; 662 goto unlock;
663 } 663 }
664 664
665 if (ar->fw.tx_seq_table) {
666 err = carl9170_write_reg(ar, ar->fw.tx_seq_table + vif_id * 4,
667 0);
668 if (err)
669 goto unlock;
670 }
671
665unlock: 672unlock:
666 if (err && (vif_id >= 0)) { 673 if (err && (vif_id >= 0)) {
667 vif_priv->active = false; 674 vif_priv->active = false;
@@ -1279,7 +1286,7 @@ static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
1279 struct ieee80211_vif *vif, 1286 struct ieee80211_vif *vif,
1280 enum ieee80211_ampdu_mlme_action action, 1287 enum ieee80211_ampdu_mlme_action action,
1281 struct ieee80211_sta *sta, 1288 struct ieee80211_sta *sta,
1282 u16 tid, u16 *ssn) 1289 u16 tid, u16 *ssn, u8 buf_size)
1283{ 1290{
1284 struct ar9170 *ar = hw->priv; 1291 struct ar9170 *ar = hw->priv;
1285 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv; 1292 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index 6cc58e052d1..6f41e21d3a1 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -862,6 +862,9 @@ static int carl9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
862 if (unlikely(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)) 862 if (unlikely(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM))
863 txc->s.misc |= CARL9170_TX_SUPER_MISC_CAB; 863 txc->s.misc |= CARL9170_TX_SUPER_MISC_CAB;
864 864
865 if (unlikely(info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
866 txc->s.misc |= CARL9170_TX_SUPER_MISC_ASSIGN_SEQ;
867
865 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) 868 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control)))
866 txc->s.misc |= CARL9170_TX_SUPER_MISC_FILL_IN_TSF; 869 txc->s.misc |= CARL9170_TX_SUPER_MISC_FILL_IN_TSF;
867 870
diff --git a/drivers/net/wireless/ath/carl9170/version.h b/drivers/net/wireless/ath/carl9170/version.h
index ee0f84f2a2f..15095c03516 100644
--- a/drivers/net/wireless/ath/carl9170/version.h
+++ b/drivers/net/wireless/ath/carl9170/version.h
@@ -1,7 +1,7 @@
1#ifndef __CARL9170_SHARED_VERSION_H 1#ifndef __CARL9170_SHARED_VERSION_H
2#define __CARL9170_SHARED_VERSION_H 2#define __CARL9170_SHARED_VERSION_H
3#define CARL9170FW_VERSION_YEAR 10 3#define CARL9170FW_VERSION_YEAR 11
4#define CARL9170FW_VERSION_MONTH 10 4#define CARL9170FW_VERSION_MONTH 1
5#define CARL9170FW_VERSION_DAY 29 5#define CARL9170FW_VERSION_DAY 22
6#define CARL9170FW_VERSION_GIT "1.9.0" 6#define CARL9170FW_VERSION_GIT "1.9.2"
7#endif /* __CARL9170_SHARED_VERSION_H */ 7#endif /* __CARL9170_SHARED_VERSION_H */
diff --git a/drivers/net/wireless/ath/carl9170/wlan.h b/drivers/net/wireless/ath/carl9170/wlan.h
index 24d63b583b6..9e1324b67e0 100644
--- a/drivers/net/wireless/ath/carl9170/wlan.h
+++ b/drivers/net/wireless/ath/carl9170/wlan.h
@@ -251,7 +251,7 @@ struct carl9170_tx_superdesc {
251 u8 ampdu_commit_factor:1; 251 u8 ampdu_commit_factor:1;
252 u8 ampdu_unused_bit:1; 252 u8 ampdu_unused_bit:1;
253 u8 queue:2; 253 u8 queue:2;
254 u8 reserved:1; 254 u8 assign_seq:1;
255 u8 vif_id:3; 255 u8 vif_id:3;
256 u8 fill_in_tsf:1; 256 u8 fill_in_tsf:1;
257 u8 cab:1; 257 u8 cab:1;
@@ -299,6 +299,7 @@ struct _ar9170_tx_hwdesc {
299 299
300#define CARL9170_TX_SUPER_MISC_QUEUE 0x3 300#define CARL9170_TX_SUPER_MISC_QUEUE 0x3
301#define CARL9170_TX_SUPER_MISC_QUEUE_S 0 301#define CARL9170_TX_SUPER_MISC_QUEUE_S 0
302#define CARL9170_TX_SUPER_MISC_ASSIGN_SEQ 0x4
302#define CARL9170_TX_SUPER_MISC_VIF_ID 0x38 303#define CARL9170_TX_SUPER_MISC_VIF_ID 0x38
303#define CARL9170_TX_SUPER_MISC_VIF_ID_S 3 304#define CARL9170_TX_SUPER_MISC_VIF_ID_S 3
304#define CARL9170_TX_SUPER_MISC_FILL_IN_TSF 0x40 305#define CARL9170_TX_SUPER_MISC_FILL_IN_TSF 0x40
@@ -413,6 +414,23 @@ enum ar9170_txq {
413 __AR9170_NUM_TXQ, 414 __AR9170_NUM_TXQ,
414}; 415};
415 416
417/*
418 * This is an workaround for several undocumented bugs.
419 * Don't mess with the QoS/AC <-> HW Queue map, if you don't
420 * know what you are doing.
421 *
422 * Known problems [hardware]:
423 * * The MAC does not aggregate frames on anything other
424 * than the first HW queue.
425 * * when an AMPDU is placed [in the first hw queue] and
426 * additional frames are already queued on a different
427 * hw queue, the MAC will ALWAYS freeze.
428 *
429 * In a nutshell: The hardware can either do QoS or
430 * Aggregation but not both at the same time. As a
431 * result, this makes the device pretty much useless
432 * for any serious 802.11n setup.
433 */
416static const u8 ar9170_qmap[__AR9170_NUM_TXQ] = { 2, 1, 0, 3 }; 434static const u8 ar9170_qmap[__AR9170_NUM_TXQ] = { 2, 1, 0, 3 };
417 435
418#define AR9170_TXQ_DEPTH 32 436#define AR9170_TXQ_DEPTH 32
diff --git a/drivers/net/wireless/ath/key.c b/drivers/net/wireless/ath/key.c
index 5d465e5fcf2..37b8e115375 100644
--- a/drivers/net/wireless/ath/key.c
+++ b/drivers/net/wireless/ath/key.c
@@ -58,8 +58,11 @@ bool ath_hw_keyreset(struct ath_common *common, u16 entry)
58 REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0); 58 REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0);
59 REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), 0); 59 REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), 0);
60 REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0); 60 REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0);
61 if (common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED) 61 if (common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED) {
62 REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), 0); 62 REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), 0);
63 REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry),
64 AR_KEYTABLE_TYPE_CLR);
65 }
63 66
64 } 67 }
65 68
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 2b14775e6bc..f828f294ba8 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -158,6 +158,13 @@ ieee80211_regdomain *ath_world_regdomain(struct ath_regulatory *reg)
158 } 158 }
159} 159}
160 160
161bool ath_is_49ghz_allowed(u16 regdomain)
162{
163 /* possibly more */
164 return regdomain == MKK9_MKKC;
165}
166EXPORT_SYMBOL(ath_is_49ghz_allowed);
167
161/* Frequency is one where radar detection is required */ 168/* Frequency is one where radar detection is required */
162static bool ath_is_radar_freq(u16 center_freq) 169static bool ath_is_radar_freq(u16 center_freq)
163{ 170{
diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h
index 345dd9721b4..172f63f671c 100644
--- a/drivers/net/wireless/ath/regd.h
+++ b/drivers/net/wireless/ath/regd.h
@@ -250,6 +250,7 @@ enum CountryCode {
250}; 250};
251 251
252bool ath_is_world_regd(struct ath_regulatory *reg); 252bool ath_is_world_regd(struct ath_regulatory *reg);
253bool ath_is_49ghz_allowed(u16 redomain);
253int ath_regd_init(struct ath_regulatory *reg, struct wiphy *wiphy, 254int ath_regd_init(struct ath_regulatory *reg, struct wiphy *wiphy,
254 int (*reg_notifier)(struct wiphy *wiphy, 255 int (*reg_notifier)(struct wiphy *wiphy,
255 struct regulatory_request *request)); 256 struct regulatory_request *request));
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 61915f37141..da60faee74f 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -1397,7 +1397,7 @@ static int ipw2100_power_cycle_adapter(struct ipw2100_priv *priv)
1397} 1397}
1398 1398
1399/* 1399/*
1400 * Send the CARD_DISABLE_PHY_OFF comamnd to the card to disable it 1400 * Send the CARD_DISABLE_PHY_OFF command to the card to disable it
1401 * 1401 *
1402 * After disabling, if the card was associated, a STATUS_ASSN_LOST will be sent. 1402 * After disabling, if the card was associated, a STATUS_ASSN_LOST will be sent.
1403 * 1403 *
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index ed424574160..e1e3b1cf3cf 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -2,6 +2,10 @@ config IWLWIFI
2 tristate "Intel Wireless Wifi" 2 tristate "Intel Wireless Wifi"
3 depends on PCI && MAC80211 3 depends on PCI && MAC80211
4 select FW_LOADER 4 select FW_LOADER
5 select NEW_LEDS
6 select LEDS_CLASS
7 select LEDS_TRIGGERS
8 select MAC80211_LEDS
5 9
6menu "Debugging Options" 10menu "Debugging Options"
7 depends on IWLWIFI 11 depends on IWLWIFI
@@ -106,9 +110,27 @@ config IWL5000
106 Intel WiFi Link 1000BGN 110 Intel WiFi Link 1000BGN
107 Intel Wireless WiFi 5150AGN 111 Intel Wireless WiFi 5150AGN
108 Intel Wireless WiFi 5100AGN, 5300AGN, and 5350AGN 112 Intel Wireless WiFi 5100AGN, 5300AGN, and 5350AGN
109 Intel 6000 Gen 2 Series Wi-Fi Adapters (6000G2A and 6000G2B) 113 Intel 6005 Series Wi-Fi Adapters
110 Intel WIreless WiFi Link 6050BGN Gen 2 Adapter 114 Intel 6030 Series Wi-Fi Adapters
115 Intel Wireless WiFi Link 6150BGN 2 Adapter
111 Intel 100 Series Wi-Fi Adapters (100BGN and 130BGN) 116 Intel 100 Series Wi-Fi Adapters (100BGN and 130BGN)
117 Intel 2000 Series Wi-Fi Adapters
118
119config IWL_P2P
120 bool "iwlwifi experimental P2P support"
121 depends on IWL5000
122 help
123 This option enables experimental P2P support for some devices
124 based on microcode support. Since P2P support is still under
125 development, this option may even enable it for some devices
126 now that turn out to not support it in the future due to
127 microcode restrictions.
128
129 To determine if your microcode supports the experimental P2P
130 offered by this option, check if the driver advertises AP
131 support when it is loaded.
132
133 Say Y only if you want to experiment with P2P.
112 134
113config IWL3945 135config IWL3945
114 tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)" 136 tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)"
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 93380f97835..25be742c69c 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -26,6 +26,7 @@ iwlagn-$(CONFIG_IWL5000) += iwl-agn-rxon.o iwl-agn-hcmd.o iwl-agn-ict.o
26iwlagn-$(CONFIG_IWL5000) += iwl-5000.o 26iwlagn-$(CONFIG_IWL5000) += iwl-5000.o
27iwlagn-$(CONFIG_IWL5000) += iwl-6000.o 27iwlagn-$(CONFIG_IWL5000) += iwl-6000.o
28iwlagn-$(CONFIG_IWL5000) += iwl-1000.o 28iwlagn-$(CONFIG_IWL5000) += iwl-1000.o
29iwlagn-$(CONFIG_IWL5000) += iwl-2000.o
29 30
30# 3945 31# 3945
31obj-$(CONFIG_IWL3945) += iwl3945.o 32obj-$(CONFIG_IWL3945) += iwl3945.o
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
new file mode 100644
index 00000000000..30483e27ce5
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -0,0 +1,561 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/pci.h>
31#include <linux/dma-mapping.h>
32#include <linux/delay.h>
33#include <linux/skbuff.h>
34#include <linux/netdevice.h>
35#include <linux/wireless.h>
36#include <net/mac80211.h>
37#include <linux/etherdevice.h>
38#include <asm/unaligned.h>
39
40#include "iwl-eeprom.h"
41#include "iwl-dev.h"
42#include "iwl-core.h"
43#include "iwl-io.h"
44#include "iwl-sta.h"
45#include "iwl-agn.h"
46#include "iwl-helpers.h"
47#include "iwl-agn-hw.h"
48#include "iwl-6000-hw.h"
49#include "iwl-agn-led.h"
50#include "iwl-agn-debugfs.h"
51
52/* Highest firmware API version supported */
53#define IWL2030_UCODE_API_MAX 5
54#define IWL2000_UCODE_API_MAX 5
55#define IWL200_UCODE_API_MAX 5
56
57/* Lowest firmware API version supported */
58#define IWL2030_UCODE_API_MIN 5
59#define IWL2000_UCODE_API_MIN 5
60#define IWL200_UCODE_API_MIN 5
61
62#define IWL2030_FW_PRE "iwlwifi-2030-"
63#define _IWL2030_MODULE_FIRMWARE(api) IWL2030_FW_PRE #api ".ucode"
64#define IWL2030_MODULE_FIRMWARE(api) _IWL2030_MODULE_FIRMWARE(api)
65
66#define IWL2000_FW_PRE "iwlwifi-2000-"
67#define _IWL2000_MODULE_FIRMWARE(api) IWL2000_FW_PRE #api ".ucode"
68#define IWL2000_MODULE_FIRMWARE(api) _IWL2000_MODULE_FIRMWARE(api)
69
70#define IWL200_FW_PRE "iwlwifi-200-"
71#define _IWL200_MODULE_FIRMWARE(api) IWL200_FW_PRE #api ".ucode"
72#define IWL200_MODULE_FIRMWARE(api) _IWL200_MODULE_FIRMWARE(api)
73
74static void iwl2000_set_ct_threshold(struct iwl_priv *priv)
75{
76 /* want Celsius */
77 priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD;
78 priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
79}
80
81/* NIC configuration for 2000 series */
82static void iwl2000_nic_config(struct iwl_priv *priv)
83{
84 u16 radio_cfg;
85
86 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
87
88 /* write radio config values to register */
89 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX)
90 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
91 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
92 EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
93 EEPROM_RF_CFG_DASH_MSK(radio_cfg));
94
95 /* set CSR_HW_CONFIG_REG for uCode use */
96 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
97 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
98 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
99
100 if (priv->cfg->iq_invert)
101 iwl_set_bit(priv, CSR_GP_DRIVER_REG,
102 CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER);
103
104}
105
106static struct iwl_sensitivity_ranges iwl2000_sensitivity = {
107 .min_nrg_cck = 97,
108 .max_nrg_cck = 0, /* not used, set to 0 */
109 .auto_corr_min_ofdm = 80,
110 .auto_corr_min_ofdm_mrc = 128,
111 .auto_corr_min_ofdm_x1 = 105,
112 .auto_corr_min_ofdm_mrc_x1 = 192,
113
114 .auto_corr_max_ofdm = 145,
115 .auto_corr_max_ofdm_mrc = 232,
116 .auto_corr_max_ofdm_x1 = 110,
117 .auto_corr_max_ofdm_mrc_x1 = 232,
118
119 .auto_corr_min_cck = 125,
120 .auto_corr_max_cck = 175,
121 .auto_corr_min_cck_mrc = 160,
122 .auto_corr_max_cck_mrc = 310,
123 .nrg_th_cck = 97,
124 .nrg_th_ofdm = 100,
125
126 .barker_corr_th_min = 190,
127 .barker_corr_th_min_mrc = 390,
128 .nrg_th_cca = 62,
129};
130
131static int iwl2000_hw_set_hw_params(struct iwl_priv *priv)
132{
133 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
134 priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES)
135 priv->cfg->base_params->num_of_queues =
136 priv->cfg->mod_params->num_of_queues;
137
138 priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
139 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
140 priv->hw_params.scd_bc_tbls_size =
141 priv->cfg->base_params->num_of_queues *
142 sizeof(struct iwlagn_scd_bc_tbl);
143 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
144 priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
145 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
146
147 priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE;
148 priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE;
149
150 priv->hw_params.max_bsm_size = 0;
151 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
152 BIT(IEEE80211_BAND_5GHZ);
153 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
154
155 priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
156 if (priv->cfg->rx_with_siso_diversity)
157 priv->hw_params.rx_chains_num = 1;
158 else
159 priv->hw_params.rx_chains_num =
160 num_of_ant(priv->cfg->valid_rx_ant);
161 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
162 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
163
164 iwl2000_set_ct_threshold(priv);
165
166 /* Set initial sensitivity parameters */
167 /* Set initial calibration set */
168 priv->hw_params.sens = &iwl2000_sensitivity;
169 priv->hw_params.calib_init_cfg =
170 BIT(IWL_CALIB_XTAL) |
171 BIT(IWL_CALIB_LO) |
172 BIT(IWL_CALIB_TX_IQ) |
173 BIT(IWL_CALIB_BASE_BAND);
174 if (priv->cfg->need_dc_calib)
175 priv->hw_params.calib_rt_cfg |= BIT(IWL_CALIB_CFG_DC_IDX);
176 if (priv->cfg->need_temp_offset_calib)
177 priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
178
179 priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
180
181 return 0;
182}
183
184static int iwl2030_hw_channel_switch(struct iwl_priv *priv,
185 struct ieee80211_channel_switch *ch_switch)
186{
187 /*
188 * MULTI-FIXME
189 * See iwl_mac_channel_switch.
190 */
191 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
192 struct iwl6000_channel_switch_cmd cmd;
193 const struct iwl_channel_info *ch_info;
194 u32 switch_time_in_usec, ucode_switch_time;
195 u16 ch;
196 u32 tsf_low;
197 u8 switch_count;
198 u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
199 struct ieee80211_vif *vif = ctx->vif;
200 struct iwl_host_cmd hcmd = {
201 .id = REPLY_CHANNEL_SWITCH,
202 .len = sizeof(cmd),
203 .flags = CMD_SYNC,
204 .data = &cmd,
205 };
206
207 cmd.band = priv->band == IEEE80211_BAND_2GHZ;
208 ch = ch_switch->channel->hw_value;
209 IWL_DEBUG_11H(priv, "channel switch from %u to %u\n",
210 ctx->active.channel, ch);
211 cmd.channel = cpu_to_le16(ch);
212 cmd.rxon_flags = ctx->staging.flags;
213 cmd.rxon_filter_flags = ctx->staging.filter_flags;
214 switch_count = ch_switch->count;
215 tsf_low = ch_switch->timestamp & 0x0ffffffff;
216 /*
217 * calculate the ucode channel switch time
218 * adding TSF as one of the factor for when to switch
219 */
220 if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
221 if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
222 beacon_interval)) {
223 switch_count -= (priv->ucode_beacon_time -
224 tsf_low) / beacon_interval;
225 } else
226 switch_count = 0;
227 }
228 if (switch_count <= 1)
229 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
230 else {
231 switch_time_in_usec =
232 vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
233 ucode_switch_time = iwl_usecs_to_beacons(priv,
234 switch_time_in_usec,
235 beacon_interval);
236 cmd.switch_time = iwl_add_beacon_time(priv,
237 priv->ucode_beacon_time,
238 ucode_switch_time,
239 beacon_interval);
240 }
241 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
242 cmd.switch_time);
243 ch_info = iwl_get_channel_info(priv, priv->band, ch);
244 if (ch_info)
245 cmd.expect_beacon = is_channel_radar(ch_info);
246 else {
247 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
248 ctx->active.channel, ch);
249 return -EFAULT;
250 }
251 priv->switch_rxon.channel = cmd.channel;
252 priv->switch_rxon.switch_in_progress = true;
253
254 return iwl_send_cmd_sync(priv, &hcmd);
255}
256
257static struct iwl_lib_ops iwl2000_lib = {
258 .set_hw_params = iwl2000_hw_set_hw_params,
259 .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
260 .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
261 .txq_set_sched = iwlagn_txq_set_sched,
262 .txq_agg_enable = iwlagn_txq_agg_enable,
263 .txq_agg_disable = iwlagn_txq_agg_disable,
264 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
265 .txq_free_tfd = iwl_hw_txq_free_tfd,
266 .txq_init = iwl_hw_tx_queue_init,
267 .rx_handler_setup = iwlagn_rx_handler_setup,
268 .setup_deferred_work = iwlagn_bt_setup_deferred_work,
269 .cancel_deferred_work = iwlagn_bt_cancel_deferred_work,
270 .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
271 .load_ucode = iwlagn_load_ucode,
272 .dump_nic_event_log = iwl_dump_nic_event_log,
273 .dump_nic_error_log = iwl_dump_nic_error_log,
274 .dump_csr = iwl_dump_csr,
275 .dump_fh = iwl_dump_fh,
276 .init_alive_start = iwlagn_init_alive_start,
277 .alive_notify = iwlagn_alive_notify,
278 .send_tx_power = iwlagn_send_tx_power,
279 .update_chain_flags = iwl_update_chain_flags,
280 .set_channel_switch = iwl2030_hw_channel_switch,
281 .apm_ops = {
282 .init = iwl_apm_init,
283 .config = iwl2000_nic_config,
284 },
285 .eeprom_ops = {
286 .regulatory_bands = {
287 EEPROM_REG_BAND_1_CHANNELS,
288 EEPROM_REG_BAND_2_CHANNELS,
289 EEPROM_REG_BAND_3_CHANNELS,
290 EEPROM_REG_BAND_4_CHANNELS,
291 EEPROM_REG_BAND_5_CHANNELS,
292 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
293 EEPROM_REG_BAND_52_HT40_CHANNELS
294 },
295 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
296 .release_semaphore = iwlcore_eeprom_release_semaphore,
297 .calib_version = iwlagn_eeprom_calib_version,
298 .query_addr = iwlagn_eeprom_query_addr,
299 .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
300 },
301 .isr_ops = {
302 .isr = iwl_isr_ict,
303 .free = iwl_free_isr_ict,
304 .alloc = iwl_alloc_isr_ict,
305 .reset = iwl_reset_ict,
306 .disable = iwl_disable_ict,
307 },
308 .temp_ops = {
309 .temperature = iwlagn_temperature,
310 },
311 .debugfs_ops = {
312 .rx_stats_read = iwl_ucode_rx_stats_read,
313 .tx_stats_read = iwl_ucode_tx_stats_read,
314 .general_stats_read = iwl_ucode_general_stats_read,
315 .bt_stats_read = iwl_ucode_bt_stats_read,
316 .reply_tx_error = iwl_reply_tx_error_read,
317 },
318 .check_plcp_health = iwl_good_plcp_health,
319 .check_ack_health = iwl_good_ack_health,
320 .txfifo_flush = iwlagn_txfifo_flush,
321 .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
322 .tt_ops = {
323 .lower_power_detection = iwl_tt_is_low_power_state,
324 .tt_power_mode = iwl_tt_current_power_mode,
325 .ct_kill_check = iwl_check_for_ct_kill,
326 }
327};
328
329static const struct iwl_ops iwl2000_ops = {
330 .lib = &iwl2000_lib,
331 .hcmd = &iwlagn_hcmd,
332 .utils = &iwlagn_hcmd_utils,
333 .led = &iwlagn_led_ops,
334 .ieee80211_ops = &iwlagn_hw_ops,
335};
336
337static const struct iwl_ops iwl2030_ops = {
338 .lib = &iwl2000_lib,
339 .hcmd = &iwlagn_bt_hcmd,
340 .utils = &iwlagn_hcmd_utils,
341 .led = &iwlagn_led_ops,
342 .ieee80211_ops = &iwlagn_hw_ops,
343};
344
345static const struct iwl_ops iwl200_ops = {
346 .lib = &iwl2000_lib,
347 .hcmd = &iwlagn_hcmd,
348 .utils = &iwlagn_hcmd_utils,
349 .led = &iwlagn_led_ops,
350 .ieee80211_ops = &iwlagn_hw_ops,
351};
352
353static const struct iwl_ops iwl230_ops = {
354 .lib = &iwl2000_lib,
355 .hcmd = &iwlagn_bt_hcmd,
356 .utils = &iwlagn_hcmd_utils,
357 .led = &iwlagn_led_ops,
358 .ieee80211_ops = &iwlagn_hw_ops,
359};
360
361static struct iwl_base_params iwl2000_base_params = {
362 .eeprom_size = OTP_LOW_IMAGE_SIZE,
363 .num_of_queues = IWLAGN_NUM_QUEUES,
364 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
365 .pll_cfg_val = 0,
366 .set_l0s = true,
367 .use_bsm = false,
368 .max_ll_items = OTP_MAX_LL_ITEMS_2x00,
369 .shadow_ram_support = true,
370 .led_compensation = 51,
371 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
372 .adv_thermal_throttle = true,
373 .support_ct_kill_exit = true,
374 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
375 .chain_noise_scale = 1000,
376 .wd_timeout = IWL_DEF_WD_TIMEOUT,
377 .max_event_log_size = 512,
378 .ucode_tracing = true,
379 .sensitivity_calib_by_driver = true,
380 .chain_noise_calib_by_driver = true,
381 .shadow_reg_enable = true,
382};
383
384
385static struct iwl_base_params iwl2030_base_params = {
386 .eeprom_size = OTP_LOW_IMAGE_SIZE,
387 .num_of_queues = IWLAGN_NUM_QUEUES,
388 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
389 .pll_cfg_val = 0,
390 .set_l0s = true,
391 .use_bsm = false,
392 .max_ll_items = OTP_MAX_LL_ITEMS_2x00,
393 .shadow_ram_support = true,
394 .led_compensation = 57,
395 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
396 .adv_thermal_throttle = true,
397 .support_ct_kill_exit = true,
398 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
399 .chain_noise_scale = 1000,
400 .wd_timeout = IWL_LONG_WD_TIMEOUT,
401 .max_event_log_size = 512,
402 .ucode_tracing = true,
403 .sensitivity_calib_by_driver = true,
404 .chain_noise_calib_by_driver = true,
405 .shadow_reg_enable = true,
406};
407
408static struct iwl_ht_params iwl2000_ht_params = {
409 .ht_greenfield_support = true,
410 .use_rts_for_aggregation = true, /* use rts/cts protection */
411};
412
413static struct iwl_bt_params iwl2030_bt_params = {
414 .bt_statistics = true,
415 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
416 .advanced_bt_coexist = true,
417 .agg_time_limit = BT_AGG_THRESHOLD_DEF,
418 .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
419 .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
420 .bt_sco_disable = true,
421};
422
423#define IWL_DEVICE_2000 \
424 .fw_name_pre = IWL2000_FW_PRE, \
425 .ucode_api_max = IWL2000_UCODE_API_MAX, \
426 .ucode_api_min = IWL2000_UCODE_API_MIN, \
427 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
428 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
429 .ops = &iwl2000_ops, \
430 .mod_params = &iwlagn_mod_params, \
431 .base_params = &iwl2000_base_params, \
432 .need_dc_calib = true, \
433 .need_temp_offset_calib = true, \
434 .led_mode = IWL_LED_RF_STATE, \
435 .iq_invert = true \
436
437struct iwl_cfg iwl2000_2bgn_cfg = {
438 .name = "2000 Series 2x2 BGN",
439 IWL_DEVICE_2000,
440 .ht_params = &iwl2000_ht_params,
441};
442
443struct iwl_cfg iwl2000_2bg_cfg = {
444 .name = "2000 Series 2x2 BG",
445 IWL_DEVICE_2000,
446};
447
448#define IWL_DEVICE_2030 \
449 .fw_name_pre = IWL2030_FW_PRE, \
450 .ucode_api_max = IWL2030_UCODE_API_MAX, \
451 .ucode_api_min = IWL2030_UCODE_API_MIN, \
452 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
453 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
454 .ops = &iwl2030_ops, \
455 .mod_params = &iwlagn_mod_params, \
456 .base_params = &iwl2030_base_params, \
457 .bt_params = &iwl2030_bt_params, \
458 .need_dc_calib = true, \
459 .need_temp_offset_calib = true, \
460 .led_mode = IWL_LED_RF_STATE, \
461 .adv_pm = true, \
462 .iq_invert = true \
463
464struct iwl_cfg iwl2030_2bgn_cfg = {
465 .name = "2000 Series 2x2 BGN/BT",
466 IWL_DEVICE_2030,
467 .ht_params = &iwl2000_ht_params,
468};
469
470struct iwl_cfg iwl2030_2bg_cfg = {
471 .name = "2000 Series 2x2 BG/BT",
472 IWL_DEVICE_2030,
473};
474
475#define IWL_DEVICE_6035 \
476 .fw_name_pre = IWL2030_FW_PRE, \
477 .ucode_api_max = IWL2030_UCODE_API_MAX, \
478 .ucode_api_min = IWL2030_UCODE_API_MIN, \
479 .eeprom_ver = EEPROM_6035_EEPROM_VERSION, \
480 .eeprom_calib_ver = EEPROM_6035_TX_POWER_VERSION, \
481 .ops = &iwl2030_ops, \
482 .mod_params = &iwlagn_mod_params, \
483 .base_params = &iwl2030_base_params, \
484 .bt_params = &iwl2030_bt_params, \
485 .need_dc_calib = true, \
486 .need_temp_offset_calib = true, \
487 .led_mode = IWL_LED_RF_STATE, \
488 .adv_pm = true \
489
490struct iwl_cfg iwl6035_2agn_cfg = {
491 .name = "2000 Series 2x2 AGN/BT",
492 IWL_DEVICE_6035,
493 .ht_params = &iwl2000_ht_params,
494};
495
496struct iwl_cfg iwl6035_2abg_cfg = {
497 .name = "2000 Series 2x2 ABG/BT",
498 IWL_DEVICE_6035,
499};
500
501struct iwl_cfg iwl6035_2bg_cfg = {
502 .name = "2000 Series 2x2 BG/BT",
503 IWL_DEVICE_6035,
504};
505
506#define IWL_DEVICE_200 \
507 .fw_name_pre = IWL200_FW_PRE, \
508 .ucode_api_max = IWL200_UCODE_API_MAX, \
509 .ucode_api_min = IWL200_UCODE_API_MIN, \
510 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
511 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
512 .ops = &iwl200_ops, \
513 .mod_params = &iwlagn_mod_params, \
514 .base_params = &iwl2000_base_params, \
515 .need_dc_calib = true, \
516 .need_temp_offset_calib = true, \
517 .led_mode = IWL_LED_RF_STATE, \
518 .adv_pm = true, \
519 .rx_with_siso_diversity = true \
520
521struct iwl_cfg iwl200_bg_cfg = {
522 .name = "200 Series 1x1 BG",
523 IWL_DEVICE_200,
524};
525
526struct iwl_cfg iwl200_bgn_cfg = {
527 .name = "200 Series 1x1 BGN",
528 IWL_DEVICE_200,
529 .ht_params = &iwl2000_ht_params,
530};
531
532#define IWL_DEVICE_230 \
533 .fw_name_pre = IWL200_FW_PRE, \
534 .ucode_api_max = IWL200_UCODE_API_MAX, \
535 .ucode_api_min = IWL200_UCODE_API_MIN, \
536 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
537 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
538 .ops = &iwl230_ops, \
539 .mod_params = &iwlagn_mod_params, \
540 .base_params = &iwl2030_base_params, \
541 .bt_params = &iwl2030_bt_params, \
542 .need_dc_calib = true, \
543 .need_temp_offset_calib = true, \
544 .led_mode = IWL_LED_RF_STATE, \
545 .adv_pm = true, \
546 .rx_with_siso_diversity = true \
547
548struct iwl_cfg iwl230_bg_cfg = {
549 .name = "200 Series 1x1 BG/BT",
550 IWL_DEVICE_230,
551};
552
553struct iwl_cfg iwl230_bgn_cfg = {
554 .name = "200 Series 1x1 BGN/BT",
555 IWL_DEVICE_230,
556 .ht_params = &iwl2000_ht_params,
557};
558
559MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_MAX));
560MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_MAX));
561MODULE_FIRMWARE(IWL200_MODULE_FIRMWARE(IWL200_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.c b/drivers/net/wireless/iwlwifi/iwl-3945-led.c
index abe2b739c4d..dc7c3a4167a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-led.c
@@ -59,33 +59,6 @@ static int iwl3945_send_led_cmd(struct iwl_priv *priv,
59 return iwl_send_cmd(priv, &cmd); 59 return iwl_send_cmd(priv, &cmd);
60} 60}
61 61
62/* Set led on command */
63static int iwl3945_led_on(struct iwl_priv *priv)
64{
65 struct iwl_led_cmd led_cmd = {
66 .id = IWL_LED_LINK,
67 .on = IWL_LED_SOLID,
68 .off = 0,
69 .interval = IWL_DEF_LED_INTRVL
70 };
71 return iwl3945_send_led_cmd(priv, &led_cmd);
72}
73
74/* Set led off command */
75static int iwl3945_led_off(struct iwl_priv *priv)
76{
77 struct iwl_led_cmd led_cmd = {
78 .id = IWL_LED_LINK,
79 .on = 0,
80 .off = 0,
81 .interval = IWL_DEF_LED_INTRVL
82 };
83 IWL_DEBUG_LED(priv, "led off\n");
84 return iwl3945_send_led_cmd(priv, &led_cmd);
85}
86
87const struct iwl_led_ops iwl3945_led_ops = { 62const struct iwl_led_ops iwl3945_led_ops = {
88 .cmd = iwl3945_send_led_cmd, 63 .cmd = iwl3945_send_led_cmd,
89 .on = iwl3945_led_on,
90 .off = iwl3945_led_off,
91}; 64};
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 39b6f16c87f..f4cd9370e7f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -528,10 +528,11 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
528 528
529 rx_status.flag = 0; 529 rx_status.flag = 0;
530 rx_status.mactime = le64_to_cpu(rx_end->timestamp); 530 rx_status.mactime = le64_to_cpu(rx_end->timestamp);
531 rx_status.freq =
532 ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel));
533 rx_status.band = (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? 531 rx_status.band = (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
534 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; 532 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
533 rx_status.freq =
534 ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel),
535 rx_status.band);
535 536
536 rx_status.rate_idx = iwl3945_hwrate_to_plcp_idx(rx_hdr->rate); 537 rx_status.rate_idx = iwl3945_hwrate_to_plcp_idx(rx_hdr->rate);
537 if (rx_status.band == IEEE80211_BAND_5GHZ) 538 if (rx_status.band == IEEE80211_BAND_5GHZ)
@@ -695,8 +696,7 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
695 696
696 /* We need to figure out how to get the sta->supp_rates while 697 /* We need to figure out how to get the sta->supp_rates while
697 * in this running context */ 698 * in this running context */
698 rate_mask = IWL_RATES_MASK; 699 rate_mask = IWL_RATES_MASK_3945;
699
700 700
701 /* Set retry limit on DATA packets and Probe Responses*/ 701 /* Set retry limit on DATA packets and Probe Responses*/
702 if (ieee80211_is_probe_resp(fc)) 702 if (ieee80211_is_probe_resp(fc))
@@ -898,13 +898,11 @@ static void iwl3945_nic_config(struct iwl_priv *priv)
898{ 898{
899 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; 899 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
900 unsigned long flags; 900 unsigned long flags;
901 u8 rev_id = 0; 901 u8 rev_id = priv->pci_dev->revision;
902 902
903 spin_lock_irqsave(&priv->lock, flags); 903 spin_lock_irqsave(&priv->lock, flags);
904 904
905 /* Determine HW type */ 905 /* Determine HW type */
906 pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &rev_id);
907
908 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id); 906 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id);
909 907
910 if (rev_id & PCI_CFG_REV_ID_BIT_RTP) 908 if (rev_id & PCI_CFG_REV_ID_BIT_RTP)
@@ -1583,7 +1581,7 @@ static int iwl3945_hw_reg_comp_txpower_temp(struct iwl_priv *priv)
1583 ref_temp); 1581 ref_temp);
1584 1582
1585 /* set tx power value for all rates, OFDM and CCK */ 1583 /* set tx power value for all rates, OFDM and CCK */
1586 for (rate_index = 0; rate_index < IWL_RATE_COUNT; 1584 for (rate_index = 0; rate_index < IWL_RATE_COUNT_3945;
1587 rate_index++) { 1585 rate_index++) {
1588 int power_idx = 1586 int power_idx =
1589 ch_info->power_info[rate_index].base_power_index; 1587 ch_info->power_info[rate_index].base_power_index;
@@ -1823,7 +1821,7 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1823 1821
1824 /* If we issue a new RXON command which required a tune then we must 1822 /* If we issue a new RXON command which required a tune then we must
1825 * send a new TXPOWER command or we won't be able to Tx any frames */ 1823 * send a new TXPOWER command or we won't be able to Tx any frames */
1826 rc = priv->cfg->ops->lib->send_tx_power(priv); 1824 rc = iwl_set_tx_power(priv, priv->tx_power_next, true);
1827 if (rc) { 1825 if (rc) {
1828 IWL_ERR(priv, "Error setting Tx power (%d).\n", rc); 1826 IWL_ERR(priv, "Error setting Tx power (%d).\n", rc);
1829 return rc; 1827 return rc;
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 91a9f525346..8998ed134d1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -251,14 +251,6 @@ static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
251*/ 251*/
252static void iwl4965_init_alive_start(struct iwl_priv *priv) 252static void iwl4965_init_alive_start(struct iwl_priv *priv)
253{ 253{
254 /* Check alive response for "valid" sign from uCode */
255 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
256 /* We had an error bringing up the hardware, so take it
257 * all the way back down so we can try again */
258 IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
259 goto restart;
260 }
261
262 /* Bootstrap uCode has loaded initialize uCode ... verify inst image. 254 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
263 * This is a paranoid check, because we would not have gotten the 255 * This is a paranoid check, because we would not have gotten the
264 * "initialize" alive if code weren't properly loaded. */ 256 * "initialize" alive if code weren't properly loaded. */
@@ -1571,7 +1563,7 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
1571 1563
1572 /* If we issue a new RXON command which required a tune then we must 1564 /* If we issue a new RXON command which required a tune then we must
1573 * send a new TXPOWER command or we won't be able to Tx any frames */ 1565 * send a new TXPOWER command or we won't be able to Tx any frames */
1574 ret = iwl_set_tx_power(priv, priv->tx_power_user_lmt, true); 1566 ret = iwl_set_tx_power(priv, priv->tx_power_next, true);
1575 if (ret) { 1567 if (ret) {
1576 IWL_ERR(priv, "Error sending TX power (%d)\n", ret); 1568 IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
1577 return ret; 1569 return ret;
@@ -2274,6 +2266,29 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2274 spin_unlock_irqrestore(&priv->sta_lock, flags); 2266 spin_unlock_irqrestore(&priv->sta_lock, flags);
2275} 2267}
2276 2268
2269static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
2270 struct iwl_rx_mem_buffer *rxb)
2271{
2272 struct iwl_rx_packet *pkt = rxb_addr(rxb);
2273 struct iwl4965_beacon_notif *beacon = (void *)pkt->u.raw;
2274#ifdef CONFIG_IWLWIFI_DEBUG
2275 u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
2276
2277 IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
2278 "tsf:0x%.8x%.8x rate:%d\n",
2279 le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
2280 beacon->beacon_notify_hdr.failure_frame,
2281 le32_to_cpu(beacon->ibss_mgr_status),
2282 le32_to_cpu(beacon->high_tsf),
2283 le32_to_cpu(beacon->low_tsf), rate);
2284#endif
2285
2286 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
2287
2288 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
2289 queue_work(priv->workqueue, &priv->beacon_update);
2290}
2291
2277static int iwl4965_calc_rssi(struct iwl_priv *priv, 2292static int iwl4965_calc_rssi(struct iwl_priv *priv,
2278 struct iwl_rx_phy_res *rx_resp) 2293 struct iwl_rx_phy_res *rx_resp)
2279{ 2294{
@@ -2316,6 +2331,12 @@ static void iwl4965_rx_handler_setup(struct iwl_priv *priv)
2316 priv->rx_handlers[REPLY_RX] = iwlagn_rx_reply_rx; 2331 priv->rx_handlers[REPLY_RX] = iwlagn_rx_reply_rx;
2317 /* Tx response */ 2332 /* Tx response */
2318 priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx; 2333 priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
2334 priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
2335
2336 /* set up notification wait support */
2337 spin_lock_init(&priv->_agn.notif_wait_lock);
2338 INIT_LIST_HEAD(&priv->_agn.notif_waits);
2339 init_waitqueue_head(&priv->_agn.notif_waitq);
2319} 2340}
2320 2341
2321static void iwl4965_setup_deferred_work(struct iwl_priv *priv) 2342static void iwl4965_setup_deferred_work(struct iwl_priv *priv)
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index ef36aff1bb4..f6493f77610 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -67,13 +67,13 @@
67#define _IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE #api ".ucode" 67#define _IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE #api ".ucode"
68#define IWL6050_MODULE_FIRMWARE(api) _IWL6050_MODULE_FIRMWARE(api) 68#define IWL6050_MODULE_FIRMWARE(api) _IWL6050_MODULE_FIRMWARE(api)
69 69
70#define IWL6000G2A_FW_PRE "iwlwifi-6000g2a-" 70#define IWL6005_FW_PRE "iwlwifi-6000g2a-"
71#define _IWL6000G2A_MODULE_FIRMWARE(api) IWL6000G2A_FW_PRE #api ".ucode" 71#define _IWL6005_MODULE_FIRMWARE(api) IWL6005_FW_PRE #api ".ucode"
72#define IWL6000G2A_MODULE_FIRMWARE(api) _IWL6000G2A_MODULE_FIRMWARE(api) 72#define IWL6005_MODULE_FIRMWARE(api) _IWL6005_MODULE_FIRMWARE(api)
73 73
74#define IWL6000G2B_FW_PRE "iwlwifi-6000g2b-" 74#define IWL6030_FW_PRE "iwlwifi-6000g2b-"
75#define _IWL6000G2B_MODULE_FIRMWARE(api) IWL6000G2B_FW_PRE #api ".ucode" 75#define _IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE #api ".ucode"
76#define IWL6000G2B_MODULE_FIRMWARE(api) _IWL6000G2B_MODULE_FIRMWARE(api) 76#define IWL6030_MODULE_FIRMWARE(api) _IWL6030_MODULE_FIRMWARE(api)
77 77
78static void iwl6000_set_ct_threshold(struct iwl_priv *priv) 78static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
79{ 79{
@@ -90,7 +90,7 @@ static void iwl6050_additional_nic_config(struct iwl_priv *priv)
90 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6); 90 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
91} 91}
92 92
93static void iwl6050g2_additional_nic_config(struct iwl_priv *priv) 93static void iwl6150_additional_nic_config(struct iwl_priv *priv)
94{ 94{
95 /* Indicate calibration version to uCode. */ 95 /* Indicate calibration version to uCode. */
96 if (priv->cfg->ops->lib->eeprom_ops.calib_version(priv) >= 6) 96 if (priv->cfg->ops->lib->eeprom_ops.calib_version(priv) >= 6)
@@ -354,7 +354,7 @@ static struct iwl_lib_ops iwl6000_lib = {
354 } 354 }
355}; 355};
356 356
357static struct iwl_lib_ops iwl6000g2b_lib = { 357static struct iwl_lib_ops iwl6030_lib = {
358 .set_hw_params = iwl6000_hw_set_hw_params, 358 .set_hw_params = iwl6000_hw_set_hw_params,
359 .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl, 359 .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
360 .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl, 360 .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
@@ -430,8 +430,8 @@ static struct iwl_nic_ops iwl6050_nic_ops = {
430 .additional_nic_config = &iwl6050_additional_nic_config, 430 .additional_nic_config = &iwl6050_additional_nic_config,
431}; 431};
432 432
433static struct iwl_nic_ops iwl6050g2_nic_ops = { 433static struct iwl_nic_ops iwl6150_nic_ops = {
434 .additional_nic_config = &iwl6050g2_additional_nic_config, 434 .additional_nic_config = &iwl6150_additional_nic_config,
435}; 435};
436 436
437static const struct iwl_ops iwl6000_ops = { 437static const struct iwl_ops iwl6000_ops = {
@@ -451,17 +451,17 @@ static const struct iwl_ops iwl6050_ops = {
451 .ieee80211_ops = &iwlagn_hw_ops, 451 .ieee80211_ops = &iwlagn_hw_ops,
452}; 452};
453 453
454static const struct iwl_ops iwl6050g2_ops = { 454static const struct iwl_ops iwl6150_ops = {
455 .lib = &iwl6000_lib, 455 .lib = &iwl6000_lib,
456 .hcmd = &iwlagn_hcmd, 456 .hcmd = &iwlagn_hcmd,
457 .utils = &iwlagn_hcmd_utils, 457 .utils = &iwlagn_hcmd_utils,
458 .led = &iwlagn_led_ops, 458 .led = &iwlagn_led_ops,
459 .nic = &iwl6050g2_nic_ops, 459 .nic = &iwl6150_nic_ops,
460 .ieee80211_ops = &iwlagn_hw_ops, 460 .ieee80211_ops = &iwlagn_hw_ops,
461}; 461};
462 462
463static const struct iwl_ops iwl6000g2b_ops = { 463static const struct iwl_ops iwl6030_ops = {
464 .lib = &iwl6000g2b_lib, 464 .lib = &iwl6030_lib,
465 .hcmd = &iwlagn_bt_hcmd, 465 .hcmd = &iwlagn_bt_hcmd,
466 .utils = &iwlagn_hcmd_utils, 466 .utils = &iwlagn_hcmd_utils,
467 .led = &iwlagn_led_ops, 467 .led = &iwlagn_led_ops,
@@ -479,7 +479,6 @@ static struct iwl_base_params iwl6000_base_params = {
479 .shadow_ram_support = true, 479 .shadow_ram_support = true,
480 .led_compensation = 51, 480 .led_compensation = 51,
481 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 481 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
482 .supports_idle = true,
483 .adv_thermal_throttle = true, 482 .adv_thermal_throttle = true,
484 .support_ct_kill_exit = true, 483 .support_ct_kill_exit = true,
485 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 484 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
@@ -503,7 +502,6 @@ static struct iwl_base_params iwl6050_base_params = {
503 .shadow_ram_support = true, 502 .shadow_ram_support = true,
504 .led_compensation = 51, 503 .led_compensation = 51,
505 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 504 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
506 .supports_idle = true,
507 .adv_thermal_throttle = true, 505 .adv_thermal_throttle = true,
508 .support_ct_kill_exit = true, 506 .support_ct_kill_exit = true,
509 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 507 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
@@ -526,7 +524,6 @@ static struct iwl_base_params iwl6000_g2_base_params = {
526 .shadow_ram_support = true, 524 .shadow_ram_support = true,
527 .led_compensation = 57, 525 .led_compensation = 57,
528 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 526 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
529 .supports_idle = true,
530 .adv_thermal_throttle = true, 527 .adv_thermal_throttle = true,
531 .support_ct_kill_exit = true, 528 .support_ct_kill_exit = true,
532 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 529 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
@@ -555,11 +552,11 @@ static struct iwl_bt_params iwl6000_bt_params = {
555}; 552};
556 553
557#define IWL_DEVICE_6005 \ 554#define IWL_DEVICE_6005 \
558 .fw_name_pre = IWL6000G2A_FW_PRE, \ 555 .fw_name_pre = IWL6005_FW_PRE, \
559 .ucode_api_max = IWL6000G2_UCODE_API_MAX, \ 556 .ucode_api_max = IWL6000G2_UCODE_API_MAX, \
560 .ucode_api_min = IWL6000G2_UCODE_API_MIN, \ 557 .ucode_api_min = IWL6000G2_UCODE_API_MIN, \
561 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION, \ 558 .eeprom_ver = EEPROM_6005_EEPROM_VERSION, \
562 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION, \ 559 .eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
563 .ops = &iwl6000_ops, \ 560 .ops = &iwl6000_ops, \
564 .mod_params = &iwlagn_mod_params, \ 561 .mod_params = &iwlagn_mod_params, \
565 .base_params = &iwl6000_g2_base_params, \ 562 .base_params = &iwl6000_g2_base_params, \
@@ -584,12 +581,12 @@ struct iwl_cfg iwl6005_2bg_cfg = {
584}; 581};
585 582
586#define IWL_DEVICE_6030 \ 583#define IWL_DEVICE_6030 \
587 .fw_name_pre = IWL6000G2B_FW_PRE, \ 584 .fw_name_pre = IWL6030_FW_PRE, \
588 .ucode_api_max = IWL6000G2_UCODE_API_MAX, \ 585 .ucode_api_max = IWL6000G2_UCODE_API_MAX, \
589 .ucode_api_min = IWL6000G2_UCODE_API_MIN, \ 586 .ucode_api_min = IWL6000G2_UCODE_API_MIN, \
590 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION, \ 587 .eeprom_ver = EEPROM_6030_EEPROM_VERSION, \
591 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION, \ 588 .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
592 .ops = &iwl6000g2b_ops, \ 589 .ops = &iwl6030_ops, \
593 .mod_params = &iwlagn_mod_params, \ 590 .mod_params = &iwlagn_mod_params, \
594 .base_params = &iwl6000_g2_base_params, \ 591 .base_params = &iwl6000_g2_base_params, \
595 .bt_params = &iwl6000_bt_params, \ 592 .bt_params = &iwl6000_bt_params, \
@@ -708,9 +705,9 @@ struct iwl_cfg iwl6150_bgn_cfg = {
708 .fw_name_pre = IWL6050_FW_PRE, 705 .fw_name_pre = IWL6050_FW_PRE,
709 .ucode_api_max = IWL6050_UCODE_API_MAX, 706 .ucode_api_max = IWL6050_UCODE_API_MAX,
710 .ucode_api_min = IWL6050_UCODE_API_MIN, 707 .ucode_api_min = IWL6050_UCODE_API_MIN,
711 .eeprom_ver = EEPROM_6050G2_EEPROM_VERSION, 708 .eeprom_ver = EEPROM_6150_EEPROM_VERSION,
712 .eeprom_calib_ver = EEPROM_6050G2_TX_POWER_VERSION, 709 .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION,
713 .ops = &iwl6050g2_ops, 710 .ops = &iwl6150_ops,
714 .mod_params = &iwlagn_mod_params, 711 .mod_params = &iwlagn_mod_params,
715 .base_params = &iwl6050_base_params, 712 .base_params = &iwl6050_base_params,
716 .ht_params = &iwl6000_ht_params, 713 .ht_params = &iwl6000_ht_params,
@@ -736,5 +733,5 @@ struct iwl_cfg iwl6000_3agn_cfg = {
736 733
737MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX)); 734MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
738MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX)); 735MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
739MODULE_FIRMWARE(IWL6000G2A_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX)); 736MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
740MODULE_FIRMWARE(IWL6000G2B_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX)); 737MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
index d16bb5ede01..9006293e740 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
@@ -631,8 +631,7 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv, void *resp)
631 } 631 }
632 632
633 spin_lock_irqsave(&priv->lock, flags); 633 spin_lock_irqsave(&priv->lock, flags);
634 if (priv->cfg->bt_params && 634 if (iwl_bt_statistics(priv)) {
635 priv->cfg->bt_params->bt_statistics) {
636 rx_info = &(((struct iwl_bt_notif_statistics *)resp)-> 635 rx_info = &(((struct iwl_bt_notif_statistics *)resp)->
637 rx.general.common); 636 rx.general.common);
638 ofdm = &(((struct iwl_bt_notif_statistics *)resp)->rx.ofdm); 637 ofdm = &(((struct iwl_bt_notif_statistics *)resp)->rx.ofdm);
@@ -897,8 +896,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
897 } 896 }
898 897
899 spin_lock_irqsave(&priv->lock, flags); 898 spin_lock_irqsave(&priv->lock, flags);
900 if (priv->cfg->bt_params && 899 if (iwl_bt_statistics(priv)) {
901 priv->cfg->bt_params->bt_statistics) {
902 rx_info = &(((struct iwl_bt_notif_statistics *)stat_resp)-> 900 rx_info = &(((struct iwl_bt_notif_statistics *)stat_resp)->
903 rx.general.common); 901 rx.general.common);
904 } else { 902 } else {
@@ -913,8 +911,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
913 911
914 rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK); 912 rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK);
915 rxon_chnum = le16_to_cpu(ctx->staging.channel); 913 rxon_chnum = le16_to_cpu(ctx->staging.channel);
916 if (priv->cfg->bt_params && 914 if (iwl_bt_statistics(priv)) {
917 priv->cfg->bt_params->bt_statistics) {
918 stat_band24 = !!(((struct iwl_bt_notif_statistics *) 915 stat_band24 = !!(((struct iwl_bt_notif_statistics *)
919 stat_resp)->flag & 916 stat_resp)->flag &
920 STATISTICS_REPLY_FLG_BAND_24G_MSK); 917 STATISTICS_REPLY_FLG_BAND_24G_MSK);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
index a6dbd8983da..b500aaae53e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
@@ -39,8 +39,7 @@ static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
39 int p = 0; 39 int p = 0;
40 u32 flag; 40 u32 flag;
41 41
42 if (priv->cfg->bt_params && 42 if (iwl_bt_statistics(priv))
43 priv->cfg->bt_params->bt_statistics)
44 flag = le32_to_cpu(priv->_agn.statistics_bt.flag); 43 flag = le32_to_cpu(priv->_agn.statistics_bt.flag);
45 else 44 else
46 flag = le32_to_cpu(priv->_agn.statistics.flag); 45 flag = le32_to_cpu(priv->_agn.statistics.flag);
@@ -89,8 +88,7 @@ ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf,
89 * the last statistics notification from uCode 88 * the last statistics notification from uCode
90 * might not reflect the current uCode activity 89 * might not reflect the current uCode activity
91 */ 90 */
92 if (priv->cfg->bt_params && 91 if (iwl_bt_statistics(priv)) {
93 priv->cfg->bt_params->bt_statistics) {
94 ofdm = &priv->_agn.statistics_bt.rx.ofdm; 92 ofdm = &priv->_agn.statistics_bt.rx.ofdm;
95 cck = &priv->_agn.statistics_bt.rx.cck; 93 cck = &priv->_agn.statistics_bt.rx.cck;
96 general = &priv->_agn.statistics_bt.rx.general.common; 94 general = &priv->_agn.statistics_bt.rx.general.common;
@@ -536,8 +534,7 @@ ssize_t iwl_ucode_tx_stats_read(struct file *file,
536 * the last statistics notification from uCode 534 * the last statistics notification from uCode
537 * might not reflect the current uCode activity 535 * might not reflect the current uCode activity
538 */ 536 */
539 if (priv->cfg->bt_params && 537 if (iwl_bt_statistics(priv)) {
540 priv->cfg->bt_params->bt_statistics) {
541 tx = &priv->_agn.statistics_bt.tx; 538 tx = &priv->_agn.statistics_bt.tx;
542 accum_tx = &priv->_agn.accum_statistics_bt.tx; 539 accum_tx = &priv->_agn.accum_statistics_bt.tx;
543 delta_tx = &priv->_agn.delta_statistics_bt.tx; 540 delta_tx = &priv->_agn.delta_statistics_bt.tx;
@@ -737,8 +734,7 @@ ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user_buf,
737 * the last statistics notification from uCode 734 * the last statistics notification from uCode
738 * might not reflect the current uCode activity 735 * might not reflect the current uCode activity
739 */ 736 */
740 if (priv->cfg->bt_params && 737 if (iwl_bt_statistics(priv)) {
741 priv->cfg->bt_params->bt_statistics) {
742 general = &priv->_agn.statistics_bt.general.common; 738 general = &priv->_agn.statistics_bt.general.common;
743 dbg = &priv->_agn.statistics_bt.general.common.dbg; 739 dbg = &priv->_agn.statistics_bt.general.common.dbg;
744 div = &priv->_agn.statistics_bt.general.common.div; 740 div = &priv->_agn.statistics_bt.general.common.div;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
index 366340f3fb0..41543ad4cb8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
@@ -305,7 +305,11 @@ static int iwlagn_set_pan_params(struct iwl_priv *priv)
305 cmd.slots[0].type = 0; /* BSS */ 305 cmd.slots[0].type = 0; /* BSS */
306 cmd.slots[1].type = 1; /* PAN */ 306 cmd.slots[1].type = 1; /* PAN */
307 307
308 if (ctx_bss->vif && ctx_pan->vif) { 308 if (priv->_agn.hw_roc_channel) {
309 /* both contexts must be used for this to happen */
310 slot1 = priv->_agn.hw_roc_duration;
311 slot0 = IWL_MIN_SLOT_TIME;
312 } else if (ctx_bss->vif && ctx_pan->vif) {
309 int bcnint = ctx_pan->vif->bss_conf.beacon_int; 313 int bcnint = ctx_pan->vif->bss_conf.beacon_int;
310 int dtim = ctx_pan->vif->bss_conf.dtim_period ?: 1; 314 int dtim = ctx_pan->vif->bss_conf.dtim_period ?: 1;
311 315
@@ -330,12 +334,12 @@ static int iwlagn_set_pan_params(struct iwl_priv *priv)
330 if (test_bit(STATUS_SCAN_HW, &priv->status) || 334 if (test_bit(STATUS_SCAN_HW, &priv->status) ||
331 (!ctx_bss->vif->bss_conf.idle && 335 (!ctx_bss->vif->bss_conf.idle &&
332 !ctx_bss->vif->bss_conf.assoc)) { 336 !ctx_bss->vif->bss_conf.assoc)) {
333 slot0 = dtim * bcnint * 3 - 20; 337 slot0 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME;
334 slot1 = 20; 338 slot1 = IWL_MIN_SLOT_TIME;
335 } else if (!ctx_pan->vif->bss_conf.idle && 339 } else if (!ctx_pan->vif->bss_conf.idle &&
336 !ctx_pan->vif->bss_conf.assoc) { 340 !ctx_pan->vif->bss_conf.assoc) {
337 slot1 = bcnint * 3 - 20; 341 slot1 = bcnint * 3 - IWL_MIN_SLOT_TIME;
338 slot0 = 20; 342 slot0 = IWL_MIN_SLOT_TIME;
339 } 343 }
340 } else if (ctx_pan->vif) { 344 } else if (ctx_pan->vif) {
341 slot0 = 0; 345 slot0 = 0;
@@ -344,8 +348,8 @@ static int iwlagn_set_pan_params(struct iwl_priv *priv)
344 slot1 = max_t(int, DEFAULT_BEACON_INTERVAL, slot1); 348 slot1 = max_t(int, DEFAULT_BEACON_INTERVAL, slot1);
345 349
346 if (test_bit(STATUS_SCAN_HW, &priv->status)) { 350 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
347 slot0 = slot1 * 3 - 20; 351 slot0 = slot1 * 3 - IWL_MIN_SLOT_TIME;
348 slot1 = 20; 352 slot1 = IWL_MIN_SLOT_TIME;
349 } 353 }
350 } 354 }
351 355
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-led.c b/drivers/net/wireless/iwlwifi/iwl-agn-led.c
index 1a24946bc20..c1190d96561 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-led.c
@@ -63,23 +63,11 @@ static int iwl_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
63} 63}
64 64
65/* Set led register off */ 65/* Set led register off */
66static int iwl_led_on_reg(struct iwl_priv *priv) 66void iwlagn_led_enable(struct iwl_priv *priv)
67{ 67{
68 IWL_DEBUG_LED(priv, "led on\n");
69 iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON); 68 iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
70 return 0;
71}
72
73/* Set led register off */
74static int iwl_led_off_reg(struct iwl_priv *priv)
75{
76 IWL_DEBUG_LED(priv, "LED Reg off\n");
77 iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_OFF);
78 return 0;
79} 69}
80 70
81const struct iwl_led_ops iwlagn_led_ops = { 71const struct iwl_led_ops iwlagn_led_ops = {
82 .cmd = iwl_send_led_cmd, 72 .cmd = iwl_send_led_cmd,
83 .on = iwl_led_on_reg,
84 .off = iwl_led_off_reg,
85}; 73};
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-led.h b/drivers/net/wireless/iwlwifi/iwl-agn-led.h
index a594e4fdc6b..96f323dc5dd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-led.h
@@ -28,5 +28,6 @@
28#define __iwl_agn_led_h__ 28#define __iwl_agn_led_h__
29 29
30extern const struct iwl_led_ops iwlagn_led_ops; 30extern const struct iwl_led_ops iwlagn_led_ops;
31void iwlagn_led_enable(struct iwl_priv *priv);
31 32
32#endif /* __iwl_agn_led_h__ */ 33#endif /* __iwl_agn_led_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index 3dee87e8f55..325ff5c89ee 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -473,6 +473,11 @@ void iwlagn_rx_handler_setup(struct iwl_priv *priv)
473 priv->rx_handlers[CALIBRATION_COMPLETE_NOTIFICATION] = 473 priv->rx_handlers[CALIBRATION_COMPLETE_NOTIFICATION] =
474 iwlagn_rx_calib_complete; 474 iwlagn_rx_calib_complete;
475 priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx; 475 priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
476
477 /* set up notification wait support */
478 spin_lock_init(&priv->_agn.notif_wait_lock);
479 INIT_LIST_HEAD(&priv->_agn.notif_waits);
480 init_waitqueue_head(&priv->_agn.notif_waitq);
476} 481}
477 482
478void iwlagn_setup_deferred_work(struct iwl_priv *priv) 483void iwlagn_setup_deferred_work(struct iwl_priv *priv)
@@ -1157,10 +1162,11 @@ void iwlagn_rx_reply_rx(struct iwl_priv *priv,
1157 1162
1158 /* rx_status carries information about the packet to mac80211 */ 1163 /* rx_status carries information about the packet to mac80211 */
1159 rx_status.mactime = le64_to_cpu(phy_res->timestamp); 1164 rx_status.mactime = le64_to_cpu(phy_res->timestamp);
1160 rx_status.freq =
1161 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel));
1162 rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? 1165 rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
1163 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; 1166 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
1167 rx_status.freq =
1168 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
1169 rx_status.band);
1164 rx_status.rate_idx = 1170 rx_status.rate_idx =
1165 iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band); 1171 iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
1166 rx_status.flag = 0; 1172 rx_status.flag = 0;
@@ -1389,15 +1395,12 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1389 u32 extra; 1395 u32 extra;
1390 u32 suspend_time = 100; 1396 u32 suspend_time = 100;
1391 u32 scan_suspend_time = 100; 1397 u32 scan_suspend_time = 100;
1392 unsigned long flags;
1393 1398
1394 IWL_DEBUG_INFO(priv, "Scanning while associated...\n"); 1399 IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
1395 spin_lock_irqsave(&priv->lock, flags);
1396 if (priv->is_internal_short_scan) 1400 if (priv->is_internal_short_scan)
1397 interval = 0; 1401 interval = 0;
1398 else 1402 else
1399 interval = vif->bss_conf.beacon_int; 1403 interval = vif->bss_conf.beacon_int;
1400 spin_unlock_irqrestore(&priv->lock, flags);
1401 1404
1402 scan->suspend_time = 0; 1405 scan->suspend_time = 0;
1403 scan->max_out_time = cpu_to_le32(200 * 1024); 1406 scan->max_out_time = cpu_to_le32(200 * 1024);
@@ -1829,7 +1832,7 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
1829 * IBSS mode (no proper uCode support for coex then). 1832 * IBSS mode (no proper uCode support for coex then).
1830 */ 1833 */
1831 if (!bt_coex_active || priv->iw_mode == NL80211_IFTYPE_ADHOC) { 1834 if (!bt_coex_active || priv->iw_mode == NL80211_IFTYPE_ADHOC) {
1832 bt_cmd.flags = 0; 1835 bt_cmd.flags = IWLAGN_BT_FLAG_COEX_MODE_DISABLED;
1833 } else { 1836 } else {
1834 bt_cmd.flags = IWLAGN_BT_FLAG_COEX_MODE_3W << 1837 bt_cmd.flags = IWLAGN_BT_FLAG_COEX_MODE_3W <<
1835 IWLAGN_BT_FLAG_COEX_MODE_SHIFT; 1838 IWLAGN_BT_FLAG_COEX_MODE_SHIFT;
@@ -1857,21 +1860,6 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
1857 if (iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG, sizeof(bt_cmd), &bt_cmd)) 1860 if (iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG, sizeof(bt_cmd), &bt_cmd))
1858 IWL_ERR(priv, "failed to send BT Coex Config\n"); 1861 IWL_ERR(priv, "failed to send BT Coex Config\n");
1859 1862
1860 /*
1861 * When we are doing a restart, need to also reconfigure BT
1862 * SCO to the device. If not doing a restart, bt_sco_active
1863 * will always be false, so there's no need to have an extra
1864 * variable to check for it.
1865 */
1866 if (priv->bt_sco_active) {
1867 struct iwlagn_bt_sco_cmd sco_cmd = { .flags = 0 };
1868
1869 if (priv->bt_sco_active)
1870 sco_cmd.flags |= IWLAGN_BT_SCO_ACTIVE;
1871 if (iwl_send_cmd_pdu(priv, REPLY_BT_COEX_SCO,
1872 sizeof(sco_cmd), &sco_cmd))
1873 IWL_ERR(priv, "failed to send BT SCO command\n");
1874 }
1875} 1863}
1876 1864
1877static void iwlagn_bt_traffic_change_work(struct work_struct *work) 1865static void iwlagn_bt_traffic_change_work(struct work_struct *work)
@@ -1881,6 +1869,11 @@ static void iwlagn_bt_traffic_change_work(struct work_struct *work)
1881 struct iwl_rxon_context *ctx; 1869 struct iwl_rxon_context *ctx;
1882 int smps_request = -1; 1870 int smps_request = -1;
1883 1871
1872 if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
1873 /* bt coex disabled */
1874 return;
1875 }
1876
1884 /* 1877 /*
1885 * Note: bt_traffic_load can be overridden by scan complete and 1878 * Note: bt_traffic_load can be overridden by scan complete and
1886 * coex profile notifications. Ignore that since only bad consequence 1879 * coex profile notifications. Ignore that since only bad consequence
@@ -2032,9 +2025,13 @@ void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
2032 unsigned long flags; 2025 unsigned long flags;
2033 struct iwl_rx_packet *pkt = rxb_addr(rxb); 2026 struct iwl_rx_packet *pkt = rxb_addr(rxb);
2034 struct iwl_bt_coex_profile_notif *coex = &pkt->u.bt_coex_profile_notif; 2027 struct iwl_bt_coex_profile_notif *coex = &pkt->u.bt_coex_profile_notif;
2035 struct iwlagn_bt_sco_cmd sco_cmd = { .flags = 0 };
2036 struct iwl_bt_uart_msg *uart_msg = &coex->last_bt_uart_msg; 2028 struct iwl_bt_uart_msg *uart_msg = &coex->last_bt_uart_msg;
2037 2029
2030 if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
2031 /* bt coex disabled */
2032 return;
2033 }
2034
2038 IWL_DEBUG_NOTIF(priv, "BT Coex notification:\n"); 2035 IWL_DEBUG_NOTIF(priv, "BT Coex notification:\n");
2039 IWL_DEBUG_NOTIF(priv, " status: %d\n", coex->bt_status); 2036 IWL_DEBUG_NOTIF(priv, " status: %d\n", coex->bt_status);
2040 IWL_DEBUG_NOTIF(priv, " traffic load: %d\n", coex->bt_traffic_load); 2037 IWL_DEBUG_NOTIF(priv, " traffic load: %d\n", coex->bt_traffic_load);
@@ -2063,15 +2060,6 @@ void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
2063 queue_work(priv->workqueue, 2060 queue_work(priv->workqueue,
2064 &priv->bt_traffic_change_work); 2061 &priv->bt_traffic_change_work);
2065 } 2062 }
2066 if (priv->bt_sco_active !=
2067 (uart_msg->frame3 & BT_UART_MSG_FRAME3SCOESCO_MSK)) {
2068 priv->bt_sco_active = uart_msg->frame3 &
2069 BT_UART_MSG_FRAME3SCOESCO_MSK;
2070 if (priv->bt_sco_active)
2071 sco_cmd.flags |= IWLAGN_BT_SCO_ACTIVE;
2072 iwl_send_cmd_pdu_async(priv, REPLY_BT_COEX_SCO,
2073 sizeof(sco_cmd), &sco_cmd, NULL);
2074 }
2075 } 2063 }
2076 2064
2077 iwlagn_set_kill_msk(priv, uart_msg); 2065 iwlagn_set_kill_msk(priv, uart_msg);
@@ -2389,3 +2377,44 @@ int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display)
2389 } 2377 }
2390 return 0; 2378 return 0;
2391} 2379}
2380
2381/* notification wait support */
2382void iwlagn_init_notification_wait(struct iwl_priv *priv,
2383 struct iwl_notification_wait *wait_entry,
2384 void (*fn)(struct iwl_priv *priv,
2385 struct iwl_rx_packet *pkt),
2386 u8 cmd)
2387{
2388 wait_entry->fn = fn;
2389 wait_entry->cmd = cmd;
2390 wait_entry->triggered = false;
2391
2392 spin_lock_bh(&priv->_agn.notif_wait_lock);
2393 list_add(&wait_entry->list, &priv->_agn.notif_waits);
2394 spin_unlock_bh(&priv->_agn.notif_wait_lock);
2395}
2396
2397signed long iwlagn_wait_notification(struct iwl_priv *priv,
2398 struct iwl_notification_wait *wait_entry,
2399 unsigned long timeout)
2400{
2401 int ret;
2402
2403 ret = wait_event_timeout(priv->_agn.notif_waitq,
2404 &wait_entry->triggered,
2405 timeout);
2406
2407 spin_lock_bh(&priv->_agn.notif_wait_lock);
2408 list_del(&wait_entry->list);
2409 spin_unlock_bh(&priv->_agn.notif_wait_lock);
2410
2411 return ret;
2412}
2413
2414void iwlagn_remove_notification(struct iwl_priv *priv,
2415 struct iwl_notification_wait *wait_entry)
2416{
2417 spin_lock_bh(&priv->_agn.notif_wait_lock);
2418 list_del(&wait_entry->list);
2419 spin_unlock_bh(&priv->_agn.notif_wait_lock);
2420}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 75fcd30a7c1..d03b4734c89 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -179,31 +179,31 @@ static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
179}; 179};
180 180
181static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = { 181static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
182 {0, 0, 0, 0, 42, 0, 76, 102, 124, 158, 183, 193, 202}, /* Norm */ 182 {0, 0, 0, 0, 42, 0, 76, 102, 124, 159, 183, 193, 202}, /* Norm */
183 {0, 0, 0, 0, 46, 0, 82, 110, 132, 167, 192, 202, 210}, /* SGI */ 183 {0, 0, 0, 0, 46, 0, 82, 110, 132, 168, 192, 202, 210}, /* SGI */
184 {0, 0, 0, 0, 48, 0, 93, 135, 176, 251, 319, 351, 381}, /* AGG */ 184 {0, 0, 0, 0, 47, 0, 91, 133, 171, 242, 305, 334, 362}, /* AGG */
185 {0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */ 185 {0, 0, 0, 0, 52, 0, 101, 145, 187, 264, 330, 361, 390}, /* AGG+SGI */
186}; 186};
187 187
188static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = { 188static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
189 {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */ 189 {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
190 {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */ 190 {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
191 {0, 0, 0, 0, 96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */ 191 {0, 0, 0, 0, 94, 0, 177, 249, 313, 423, 512, 550, 586}, /* AGG */
192 {0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683}, /* AGG+SGI */ 192 {0, 0, 0, 0, 104, 0, 193, 270, 338, 454, 545, 584, 620}, /* AGG+SGI */
193}; 193};
194 194
195static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = { 195static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
196 {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */ 196 {0, 0, 0, 0, 74, 0, 123, 155, 179, 214, 236, 244, 251}, /* Norm */
197 {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */ 197 {0, 0, 0, 0, 81, 0, 131, 164, 188, 223, 243, 251, 257}, /* SGI */
198 {0, 0, 0, 0, 92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */ 198 {0, 0, 0, 0, 89, 0, 167, 235, 296, 402, 488, 526, 560}, /* AGG */
199 {0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660}, /* AGG+SGI*/ 199 {0, 0, 0, 0, 97, 0, 182, 255, 320, 431, 520, 558, 593}, /* AGG+SGI*/
200}; 200};
201 201
202static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = { 202static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
203 {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */ 203 {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
204 {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */ 204 {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
205 {0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922}, /* AGG */ 205 {0, 0, 0, 0, 171, 0, 305, 410, 496, 634, 731, 771, 805}, /* AGG */
206 {0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966}, /* AGG+SGI */ 206 {0, 0, 0, 0, 186, 0, 329, 439, 527, 667, 764, 803, 838}, /* AGG+SGI */
207}; 207};
208 208
209static s32 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = { 209static s32 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = {
@@ -2890,6 +2890,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
2890 u8 ant_toggle_cnt = 0; 2890 u8 ant_toggle_cnt = 0;
2891 u8 use_ht_possible = 1; 2891 u8 use_ht_possible = 1;
2892 u8 valid_tx_ant = 0; 2892 u8 valid_tx_ant = 0;
2893 struct iwl_station_priv *sta_priv =
2894 container_of(lq_sta, struct iwl_station_priv, lq_sta);
2893 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq; 2895 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
2894 2896
2895 /* Override starting rate (index 0) if needed for debug purposes */ 2897 /* Override starting rate (index 0) if needed for debug purposes */
@@ -3008,7 +3010,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
3008 repeat_rate--; 3010 repeat_rate--;
3009 } 3011 }
3010 3012
3011 lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF; 3013 lq_cmd->agg_params.agg_frame_cnt_limit =
3014 sta_priv->max_agg_bufsize ?: LINK_QUAL_AGG_FRAME_LIMIT_DEF;
3012 lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF; 3015 lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
3013 3016
3014 lq_cmd->agg_params.agg_time_limit = 3017 lq_cmd->agg_params.agg_time_limit =
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index 75e50d33ecb..184828c72b3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -213,6 +213,7 @@ enum {
213 IWL_CCK_BASIC_RATES_MASK) 213 IWL_CCK_BASIC_RATES_MASK)
214 214
215#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1) 215#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
216#define IWL_RATES_MASK_3945 ((1 << IWL_RATE_COUNT_3945) - 1)
216 217
217#define IWL_INVALID_VALUE -1 218#define IWL_INVALID_VALUE -1
218 219
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
index bbd40b7dd59..b192ca842f0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
@@ -73,8 +73,7 @@ static void iwl_rx_calc_noise(struct iwl_priv *priv)
73 int bcn_silence_a, bcn_silence_b, bcn_silence_c; 73 int bcn_silence_a, bcn_silence_b, bcn_silence_c;
74 int last_rx_noise; 74 int last_rx_noise;
75 75
76 if (priv->cfg->bt_params && 76 if (iwl_bt_statistics(priv))
77 priv->cfg->bt_params->bt_statistics)
78 rx_info = &(priv->_agn.statistics_bt.rx.general.common); 77 rx_info = &(priv->_agn.statistics_bt.rx.general.common);
79 else 78 else
80 rx_info = &(priv->_agn.statistics.rx.general); 79 rx_info = &(priv->_agn.statistics.rx.general);
@@ -125,8 +124,7 @@ static void iwl_accumulative_statistics(struct iwl_priv *priv,
125 struct statistics_general_common *general, *accum_general; 124 struct statistics_general_common *general, *accum_general;
126 struct statistics_tx *tx, *accum_tx; 125 struct statistics_tx *tx, *accum_tx;
127 126
128 if (priv->cfg->bt_params && 127 if (iwl_bt_statistics(priv)) {
129 priv->cfg->bt_params->bt_statistics) {
130 prev_stats = (__le32 *)&priv->_agn.statistics_bt; 128 prev_stats = (__le32 *)&priv->_agn.statistics_bt;
131 accum_stats = (u32 *)&priv->_agn.accum_statistics_bt; 129 accum_stats = (u32 *)&priv->_agn.accum_statistics_bt;
132 size = sizeof(struct iwl_bt_notif_statistics); 130 size = sizeof(struct iwl_bt_notif_statistics);
@@ -207,8 +205,7 @@ bool iwl_good_plcp_health(struct iwl_priv *priv,
207 struct statistics_rx_phy *ofdm; 205 struct statistics_rx_phy *ofdm;
208 struct statistics_rx_ht_phy *ofdm_ht; 206 struct statistics_rx_ht_phy *ofdm_ht;
209 207
210 if (priv->cfg->bt_params && 208 if (iwl_bt_statistics(priv)) {
211 priv->cfg->bt_params->bt_statistics) {
212 ofdm = &pkt->u.stats_bt.rx.ofdm; 209 ofdm = &pkt->u.stats_bt.rx.ofdm;
213 ofdm_ht = &pkt->u.stats_bt.rx.ofdm_ht; 210 ofdm_ht = &pkt->u.stats_bt.rx.ofdm_ht;
214 combined_plcp_delta = 211 combined_plcp_delta =
@@ -265,8 +262,7 @@ void iwl_rx_statistics(struct iwl_priv *priv,
265 int change; 262 int change;
266 struct iwl_rx_packet *pkt = rxb_addr(rxb); 263 struct iwl_rx_packet *pkt = rxb_addr(rxb);
267 264
268 if (priv->cfg->bt_params && 265 if (iwl_bt_statistics(priv)) {
269 priv->cfg->bt_params->bt_statistics) {
270 IWL_DEBUG_RX(priv, 266 IWL_DEBUG_RX(priv,
271 "Statistics notification received (%d vs %d).\n", 267 "Statistics notification received (%d vs %d).\n",
272 (int)sizeof(struct iwl_bt_notif_statistics), 268 (int)sizeof(struct iwl_bt_notif_statistics),
@@ -304,8 +300,7 @@ void iwl_rx_statistics(struct iwl_priv *priv,
304 300
305 iwl_recover_from_statistics(priv, pkt); 301 iwl_recover_from_statistics(priv, pkt);
306 302
307 if (priv->cfg->bt_params && 303 if (iwl_bt_statistics(priv))
308 priv->cfg->bt_params->bt_statistics)
309 memcpy(&priv->_agn.statistics_bt, &pkt->u.stats_bt, 304 memcpy(&priv->_agn.statistics_bt, &pkt->u.stats_bt,
310 sizeof(priv->_agn.statistics_bt)); 305 sizeof(priv->_agn.statistics_bt));
311 else 306 else
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
index 6d140bd5329..6c2adc58d65 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
@@ -52,10 +52,14 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
52 struct iwl_rxon_context *ctx, 52 struct iwl_rxon_context *ctx,
53 struct iwl_rxon_cmd *send) 53 struct iwl_rxon_cmd *send)
54{ 54{
55 struct iwl_notification_wait disable_wait;
55 __le32 old_filter = send->filter_flags; 56 __le32 old_filter = send->filter_flags;
56 u8 old_dev_type = send->dev_type; 57 u8 old_dev_type = send->dev_type;
57 int ret; 58 int ret;
58 59
60 iwlagn_init_notification_wait(priv, &disable_wait, NULL,
61 REPLY_WIPAN_DEACTIVATION_COMPLETE);
62
59 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 63 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
60 send->dev_type = RXON_DEV_TYPE_P2P; 64 send->dev_type = RXON_DEV_TYPE_P2P;
61 ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd, sizeof(*send), send); 65 ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd, sizeof(*send), send);
@@ -63,11 +67,18 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
63 send->filter_flags = old_filter; 67 send->filter_flags = old_filter;
64 send->dev_type = old_dev_type; 68 send->dev_type = old_dev_type;
65 69
66 if (ret) 70 if (ret) {
67 IWL_ERR(priv, "Error disabling PAN (%d)\n", ret); 71 IWL_ERR(priv, "Error disabling PAN (%d)\n", ret);
68 72 iwlagn_remove_notification(priv, &disable_wait);
69 /* FIXME: WAIT FOR PAN DISABLE */ 73 } else {
70 msleep(300); 74 signed long wait_res;
75
76 wait_res = iwlagn_wait_notification(priv, &disable_wait, HZ);
77 if (wait_res == 0) {
78 IWL_ERR(priv, "Timed out waiting for PAN disable\n");
79 ret = -EIO;
80 }
81 }
71 82
72 return ret; 83 return ret;
73} 84}
@@ -145,6 +156,23 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
145 /* always get timestamp with Rx frame */ 156 /* always get timestamp with Rx frame */
146 ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK; 157 ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
147 158
159 if (ctx->ctxid == IWL_RXON_CTX_PAN && priv->_agn.hw_roc_channel) {
160 struct ieee80211_channel *chan = priv->_agn.hw_roc_channel;
161
162 iwl_set_rxon_channel(priv, chan, ctx);
163 iwl_set_flags_for_band(priv, ctx, chan->band, NULL);
164 ctx->staging.filter_flags |=
165 RXON_FILTER_ASSOC_MSK |
166 RXON_FILTER_PROMISC_MSK |
167 RXON_FILTER_CTL2HOST_MSK;
168 ctx->staging.dev_type = RXON_DEV_TYPE_P2P;
169 new_assoc = true;
170
171 if (memcmp(&ctx->staging, &ctx->active,
172 sizeof(ctx->staging)) == 0)
173 return 0;
174 }
175
148 if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) || 176 if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
149 !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK)) 177 !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
150 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; 178 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
@@ -288,10 +316,9 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
288 * If we issue a new RXON command which required a tune then we must 316 * If we issue a new RXON command which required a tune then we must
289 * send a new TXPOWER command or we won't be able to Tx any frames. 317 * send a new TXPOWER command or we won't be able to Tx any frames.
290 * 318 *
291 * FIXME: which RXON requires a tune? Can we optimise this out in 319 * It's expected we set power here if channel is changing.
292 * some cases?
293 */ 320 */
294 ret = iwl_set_tx_power(priv, priv->tx_power_user_lmt, true); 321 ret = iwl_set_tx_power(priv, priv->tx_power_next, true);
295 if (ret) { 322 if (ret) {
296 IWL_ERR(priv, "Error sending TX power (%d)\n", ret); 323 IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
297 return ret; 324 return ret;
@@ -546,12 +573,10 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
546 573
547 if (changes & BSS_CHANGED_ASSOC) { 574 if (changes & BSS_CHANGED_ASSOC) {
548 if (bss_conf->assoc) { 575 if (bss_conf->assoc) {
549 iwl_led_associate(priv);
550 priv->timestamp = bss_conf->timestamp; 576 priv->timestamp = bss_conf->timestamp;
551 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; 577 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
552 } else { 578 } else {
553 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 579 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
554 iwl_led_disassociate(priv);
555 } 580 }
556 } 581 }
557 582
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index 24a11b8f73b..266490d8a39 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -539,7 +539,14 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
539 unsigned long flags; 539 unsigned long flags;
540 bool is_agg = false; 540 bool is_agg = false;
541 541
542 if (info->control.vif) 542 /*
543 * If the frame needs to go out off-channel, then
544 * we'll have put the PAN context to that channel,
545 * so make the frame go out there.
546 */
547 if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
548 ctx = &priv->contexts[IWL_RXON_CTX_PAN];
549 else if (info->control.vif)
543 ctx = iwl_rxon_ctx_from_vif(info->control.vif); 550 ctx = iwl_rxon_ctx_from_vif(info->control.vif);
544 551
545 spin_lock_irqsave(&priv->lock, flags); 552 spin_lock_irqsave(&priv->lock, flags);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
index 24dabcd2a36..d807e5e2b71 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
@@ -308,14 +308,6 @@ void iwlagn_init_alive_start(struct iwl_priv *priv)
308{ 308{
309 int ret = 0; 309 int ret = 0;
310 310
311 /* Check alive response for "valid" sign from uCode */
312 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
313 /* We had an error bringing up the hardware, so take it
314 * all the way back down so we can try again */
315 IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
316 goto restart;
317 }
318
319 /* initialize uCode was loaded... verify inst image. 311 /* initialize uCode was loaded... verify inst image.
320 * This is a paranoid check, because we would not have gotten the 312 * This is a paranoid check, because we would not have gotten the
321 * "initialize" alive if code weren't properly loaded. */ 313 * "initialize" alive if code weren't properly loaded. */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index c1cfd9952e5..8025c62d4d0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -59,6 +59,7 @@
59#include "iwl-sta.h" 59#include "iwl-sta.h"
60#include "iwl-agn-calib.h" 60#include "iwl-agn-calib.h"
61#include "iwl-agn.h" 61#include "iwl-agn.h"
62#include "iwl-agn-led.h"
62 63
63 64
64/****************************************************************************** 65/******************************************************************************
@@ -461,8 +462,12 @@ static void iwl_rx_reply_alive(struct iwl_priv *priv,
461 if (palive->is_valid == UCODE_VALID_OK) 462 if (palive->is_valid == UCODE_VALID_OK)
462 queue_delayed_work(priv->workqueue, pwork, 463 queue_delayed_work(priv->workqueue, pwork,
463 msecs_to_jiffies(5)); 464 msecs_to_jiffies(5));
464 else 465 else {
465 IWL_WARN(priv, "uCode did not respond OK.\n"); 466 IWL_WARN(priv, "%s uCode did not respond OK.\n",
467 (palive->ver_subtype == INITIALIZE_SUBTYPE) ?
468 "init" : "runtime");
469 queue_work(priv->workqueue, &priv->restart);
470 }
466} 471}
467 472
468static void iwl_bg_beacon_update(struct work_struct *work) 473static void iwl_bg_beacon_update(struct work_struct *work)
@@ -699,18 +704,18 @@ static void iwl_bg_ucode_trace(unsigned long data)
699 } 704 }
700} 705}
701 706
702static void iwl_rx_beacon_notif(struct iwl_priv *priv, 707static void iwlagn_rx_beacon_notif(struct iwl_priv *priv,
703 struct iwl_rx_mem_buffer *rxb) 708 struct iwl_rx_mem_buffer *rxb)
704{ 709{
705 struct iwl_rx_packet *pkt = rxb_addr(rxb); 710 struct iwl_rx_packet *pkt = rxb_addr(rxb);
706 struct iwl4965_beacon_notif *beacon = 711 struct iwlagn_beacon_notif *beacon = (void *)pkt->u.raw;
707 (struct iwl4965_beacon_notif *)pkt->u.raw;
708#ifdef CONFIG_IWLWIFI_DEBUG 712#ifdef CONFIG_IWLWIFI_DEBUG
713 u16 status = le16_to_cpu(beacon->beacon_notify_hdr.status.status);
709 u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags); 714 u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
710 715
711 IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d " 716 IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
712 "tsf %d %d rate %d\n", 717 "tsf:0x%.8x%.8x rate:%d\n",
713 le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK, 718 status & TX_STATUS_MSK,
714 beacon->beacon_notify_hdr.failure_frame, 719 beacon->beacon_notify_hdr.failure_frame,
715 le32_to_cpu(beacon->ibss_mgr_status), 720 le32_to_cpu(beacon->ibss_mgr_status),
716 le32_to_cpu(beacon->high_tsf), 721 le32_to_cpu(beacon->high_tsf),
@@ -813,7 +818,7 @@ static void iwl_setup_rx_handlers(struct iwl_priv *priv)
813 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif; 818 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
814 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] = 819 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
815 iwl_rx_pm_debug_statistics_notif; 820 iwl_rx_pm_debug_statistics_notif;
816 priv->rx_handlers[BEACON_NOTIFICATION] = iwl_rx_beacon_notif; 821 priv->rx_handlers[BEACON_NOTIFICATION] = iwlagn_rx_beacon_notif;
817 822
818 /* 823 /*
819 * The same handler is used for both the REPLY to a discrete 824 * The same handler is used for both the REPLY to a discrete
@@ -846,7 +851,7 @@ static void iwl_setup_rx_handlers(struct iwl_priv *priv)
846 * the appropriate handlers, including command responses, 851 * the appropriate handlers, including command responses,
847 * frame-received notifications, and other notifications. 852 * frame-received notifications, and other notifications.
848 */ 853 */
849void iwl_rx_handle(struct iwl_priv *priv) 854static void iwl_rx_handle(struct iwl_priv *priv)
850{ 855{
851 struct iwl_rx_mem_buffer *rxb; 856 struct iwl_rx_mem_buffer *rxb;
852 struct iwl_rx_packet *pkt; 857 struct iwl_rx_packet *pkt;
@@ -910,6 +915,27 @@ void iwl_rx_handle(struct iwl_priv *priv)
910 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) && 915 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
911 (pkt->hdr.cmd != REPLY_TX); 916 (pkt->hdr.cmd != REPLY_TX);
912 917
918 /*
919 * Do the notification wait before RX handlers so
920 * even if the RX handler consumes the RXB we have
921 * access to it in the notification wait entry.
922 */
923 if (!list_empty(&priv->_agn.notif_waits)) {
924 struct iwl_notification_wait *w;
925
926 spin_lock(&priv->_agn.notif_wait_lock);
927 list_for_each_entry(w, &priv->_agn.notif_waits, list) {
928 if (w->cmd == pkt->hdr.cmd) {
929 w->triggered = true;
930 if (w->fn)
931 w->fn(priv, pkt);
932 }
933 }
934 spin_unlock(&priv->_agn.notif_wait_lock);
935
936 wake_up_all(&priv->_agn.notif_waitq);
937 }
938
913 /* Based on type of command response or notification, 939 /* Based on type of command response or notification,
914 * handle those that need handling via function in 940 * handle those that need handling via function in
915 * rx_handlers table. See iwl_setup_rx_handlers() */ 941 * rx_handlers table. See iwl_setup_rx_handlers() */
@@ -1387,34 +1413,42 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
1387/** 1413/**
1388 * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries. 1414 * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries.
1389 * 1415 *
1390 * When the ACK count ratio is 0 and aggregated BA timeout retries exceeding 1416 * When the ACK count ratio is low and aggregated BA timeout retries exceeding
1391 * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal 1417 * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal
1392 * operation state. 1418 * operation state.
1393 */ 1419 */
1394bool iwl_good_ack_health(struct iwl_priv *priv, 1420bool iwl_good_ack_health(struct iwl_priv *priv, struct iwl_rx_packet *pkt)
1395 struct iwl_rx_packet *pkt)
1396{ 1421{
1397 bool rc = true; 1422 int actual_delta, expected_delta, ba_timeout_delta;
1398 int actual_ack_cnt_delta, expected_ack_cnt_delta; 1423 struct statistics_tx *cur, *old;
1399 int ba_timeout_delta; 1424
1400 1425 if (priv->_agn.agg_tids_count)
1401 actual_ack_cnt_delta = 1426 return true;
1402 le32_to_cpu(pkt->u.stats.tx.actual_ack_cnt) - 1427
1403 le32_to_cpu(priv->_agn.statistics.tx.actual_ack_cnt); 1428 if (iwl_bt_statistics(priv)) {
1404 expected_ack_cnt_delta = 1429 cur = &pkt->u.stats_bt.tx;
1405 le32_to_cpu(pkt->u.stats.tx.expected_ack_cnt) - 1430 old = &priv->_agn.statistics_bt.tx;
1406 le32_to_cpu(priv->_agn.statistics.tx.expected_ack_cnt); 1431 } else {
1407 ba_timeout_delta = 1432 cur = &pkt->u.stats.tx;
1408 le32_to_cpu(pkt->u.stats.tx.agg.ba_timeout) - 1433 old = &priv->_agn.statistics.tx;
1409 le32_to_cpu(priv->_agn.statistics.tx.agg.ba_timeout); 1434 }
1410 if ((priv->_agn.agg_tids_count > 0) && 1435
1411 (expected_ack_cnt_delta > 0) && 1436 actual_delta = le32_to_cpu(cur->actual_ack_cnt) -
1412 (((actual_ack_cnt_delta * 100) / expected_ack_cnt_delta) 1437 le32_to_cpu(old->actual_ack_cnt);
1413 < ACK_CNT_RATIO) && 1438 expected_delta = le32_to_cpu(cur->expected_ack_cnt) -
1414 (ba_timeout_delta > BA_TIMEOUT_CNT)) { 1439 le32_to_cpu(old->expected_ack_cnt);
1415 IWL_DEBUG_RADIO(priv, "actual_ack_cnt delta = %d," 1440
1416 " expected_ack_cnt = %d\n", 1441 /* Values should not be negative, but we do not trust the firmware */
1417 actual_ack_cnt_delta, expected_ack_cnt_delta); 1442 if (actual_delta <= 0 || expected_delta <= 0)
1443 return true;
1444
1445 ba_timeout_delta = le32_to_cpu(cur->agg.ba_timeout) -
1446 le32_to_cpu(old->agg.ba_timeout);
1447
1448 if ((actual_delta * 100 / expected_delta) < ACK_CNT_RATIO &&
1449 ba_timeout_delta > BA_TIMEOUT_CNT) {
1450 IWL_DEBUG_RADIO(priv, "deltas: actual %d expected %d ba_timeout %d\n",
1451 actual_delta, expected_delta, ba_timeout_delta);
1418 1452
1419#ifdef CONFIG_IWLWIFI_DEBUGFS 1453#ifdef CONFIG_IWLWIFI_DEBUGFS
1420 /* 1454 /*
@@ -1422,20 +1456,18 @@ bool iwl_good_ack_health(struct iwl_priv *priv,
1422 * statistics aren't available. If DEBUGFS is set but 1456 * statistics aren't available. If DEBUGFS is set but
1423 * DEBUG is not, these will just compile out. 1457 * DEBUG is not, these will just compile out.
1424 */ 1458 */
1425 IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta = %d\n", 1459 IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta %d\n",
1426 priv->_agn.delta_statistics.tx.rx_detected_cnt); 1460 priv->_agn.delta_statistics.tx.rx_detected_cnt);
1427 IWL_DEBUG_RADIO(priv, 1461 IWL_DEBUG_RADIO(priv,
1428 "ack_or_ba_timeout_collision delta = %d\n", 1462 "ack_or_ba_timeout_collision delta %d\n",
1429 priv->_agn.delta_statistics.tx. 1463 priv->_agn.delta_statistics.tx.ack_or_ba_timeout_collision);
1430 ack_or_ba_timeout_collision);
1431#endif 1464#endif
1432 IWL_DEBUG_RADIO(priv, "agg ba_timeout delta = %d\n", 1465
1433 ba_timeout_delta); 1466 if (ba_timeout_delta >= BA_TIMEOUT_MAX)
1434 if (!actual_ack_cnt_delta && 1467 return false;
1435 (ba_timeout_delta >= BA_TIMEOUT_MAX))
1436 rc = false;
1437 } 1468 }
1438 return rc; 1469
1470 return true;
1439} 1471}
1440 1472
1441 1473
@@ -2632,13 +2664,6 @@ static void iwl_alive_start(struct iwl_priv *priv)
2632 2664
2633 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); 2665 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
2634 2666
2635 if (priv->card_alive.is_valid != UCODE_VALID_OK) {
2636 /* We had an error bringing up the hardware, so take it
2637 * all the way back down so we can try again */
2638 IWL_DEBUG_INFO(priv, "Alive failed.\n");
2639 goto restart;
2640 }
2641
2642 /* Initialize uCode has loaded Runtime uCode ... verify inst image. 2667 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
2643 * This is a paranoid check, because we would not have gotten the 2668 * This is a paranoid check, because we would not have gotten the
2644 * "runtime" alive if code weren't properly loaded. */ 2669 * "runtime" alive if code weren't properly loaded. */
@@ -2726,8 +2751,6 @@ static void iwl_alive_start(struct iwl_priv *priv)
2726 /* At this point, the NIC is initialized and operational */ 2751 /* At this point, the NIC is initialized and operational */
2727 iwl_rf_kill_ct_config(priv); 2752 iwl_rf_kill_ct_config(priv);
2728 2753
2729 iwl_leds_init(priv);
2730
2731 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); 2754 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
2732 wake_up_interruptible(&priv->wait_command_queue); 2755 wake_up_interruptible(&priv->wait_command_queue);
2733 2756
@@ -2769,7 +2792,6 @@ static void __iwl_down(struct iwl_priv *priv)
2769 priv->cfg->bt_params->bt_init_traffic_load; 2792 priv->cfg->bt_params->bt_init_traffic_load;
2770 else 2793 else
2771 priv->bt_traffic_load = 0; 2794 priv->bt_traffic_load = 0;
2772 priv->bt_sco_active = false;
2773 priv->bt_full_concurrent = false; 2795 priv->bt_full_concurrent = false;
2774 priv->bt_ci_compliance = 0; 2796 priv->bt_ci_compliance = 0;
2775 2797
@@ -3063,8 +3085,7 @@ static void iwl_bg_run_time_calib_work(struct work_struct *work)
3063 } 3085 }
3064 3086
3065 if (priv->start_calib) { 3087 if (priv->start_calib) {
3066 if (priv->cfg->bt_params && 3088 if (iwl_bt_statistics(priv)) {
3067 priv->cfg->bt_params->bt_statistics) {
3068 iwl_chain_noise_calibration(priv, 3089 iwl_chain_noise_calibration(priv,
3069 (void *)&priv->_agn.statistics_bt); 3090 (void *)&priv->_agn.statistics_bt);
3070 iwl_sensitivity_calibration(priv, 3091 iwl_sensitivity_calibration(priv,
@@ -3089,7 +3110,7 @@ static void iwl_bg_restart(struct work_struct *data)
3089 3110
3090 if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) { 3111 if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
3091 struct iwl_rxon_context *ctx; 3112 struct iwl_rxon_context *ctx;
3092 bool bt_sco, bt_full_concurrent; 3113 bool bt_full_concurrent;
3093 u8 bt_ci_compliance; 3114 u8 bt_ci_compliance;
3094 u8 bt_load; 3115 u8 bt_load;
3095 u8 bt_status; 3116 u8 bt_status;
@@ -3108,7 +3129,6 @@ static void iwl_bg_restart(struct work_struct *data)
3108 * re-configure the hw when we reconfigure the BT 3129 * re-configure the hw when we reconfigure the BT
3109 * command. 3130 * command.
3110 */ 3131 */
3111 bt_sco = priv->bt_sco_active;
3112 bt_full_concurrent = priv->bt_full_concurrent; 3132 bt_full_concurrent = priv->bt_full_concurrent;
3113 bt_ci_compliance = priv->bt_ci_compliance; 3133 bt_ci_compliance = priv->bt_ci_compliance;
3114 bt_load = priv->bt_traffic_load; 3134 bt_load = priv->bt_traffic_load;
@@ -3116,7 +3136,6 @@ static void iwl_bg_restart(struct work_struct *data)
3116 3136
3117 __iwl_down(priv); 3137 __iwl_down(priv);
3118 3138
3119 priv->bt_sco_active = bt_sco;
3120 priv->bt_full_concurrent = bt_full_concurrent; 3139 priv->bt_full_concurrent = bt_full_concurrent;
3121 priv->bt_ci_compliance = bt_ci_compliance; 3140 priv->bt_ci_compliance = bt_ci_compliance;
3122 priv->bt_traffic_load = bt_load; 3141 priv->bt_traffic_load = bt_load;
@@ -3178,6 +3197,8 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
3178 IEEE80211_HW_SPECTRUM_MGMT | 3197 IEEE80211_HW_SPECTRUM_MGMT |
3179 IEEE80211_HW_REPORTS_TX_ACK_STATUS; 3198 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
3180 3199
3200 hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
3201
3181 if (!priv->cfg->base_params->broken_powersave) 3202 if (!priv->cfg->base_params->broken_powersave)
3182 hw->flags |= IEEE80211_HW_SUPPORTS_PS | 3203 hw->flags |= IEEE80211_HW_SUPPORTS_PS |
3183 IEEE80211_HW_SUPPORTS_DYNAMIC_PS; 3204 IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
@@ -3194,8 +3215,11 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
3194 hw->wiphy->interface_modes |= ctx->exclusive_interface_modes; 3215 hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
3195 } 3216 }
3196 3217
3218 hw->wiphy->max_remain_on_channel_duration = 1000;
3219
3197 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | 3220 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
3198 WIPHY_FLAG_DISABLE_BEACON_HINTS; 3221 WIPHY_FLAG_DISABLE_BEACON_HINTS |
3222 WIPHY_FLAG_IBSS_RSN;
3199 3223
3200 /* 3224 /*
3201 * For now, disable PS by default because it affects 3225 * For now, disable PS by default because it affects
@@ -3219,6 +3243,8 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
3219 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = 3243 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
3220 &priv->bands[IEEE80211_BAND_5GHZ]; 3244 &priv->bands[IEEE80211_BAND_5GHZ];
3221 3245
3246 iwl_leds_init(priv);
3247
3222 ret = ieee80211_register_hw(priv->hw); 3248 ret = ieee80211_register_hw(priv->hw);
3223 if (ret) { 3249 if (ret) {
3224 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret); 3250 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
@@ -3263,7 +3289,7 @@ int iwlagn_mac_start(struct ieee80211_hw *hw)
3263 } 3289 }
3264 } 3290 }
3265 3291
3266 iwl_led_start(priv); 3292 iwlagn_led_enable(priv);
3267 3293
3268out: 3294out:
3269 priv->is_open = 1; 3295 priv->is_open = 1;
@@ -3345,6 +3371,14 @@ int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3345 return -EOPNOTSUPP; 3371 return -EOPNOTSUPP;
3346 } 3372 }
3347 3373
3374 /*
3375 * To support IBSS RSN, don't program group keys in IBSS, the
3376 * hardware will then not attempt to decrypt the frames.
3377 */
3378 if (vif->type == NL80211_IFTYPE_ADHOC &&
3379 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
3380 return -EOPNOTSUPP;
3381
3348 sta_id = iwl_sta_id_or_broadcast(priv, vif_priv->ctx, sta); 3382 sta_id = iwl_sta_id_or_broadcast(priv, vif_priv->ctx, sta);
3349 if (sta_id == IWL_INVALID_STATION) 3383 if (sta_id == IWL_INVALID_STATION)
3350 return -EINVAL; 3384 return -EINVAL;
@@ -3399,10 +3433,12 @@ int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3399int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, 3433int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
3400 struct ieee80211_vif *vif, 3434 struct ieee80211_vif *vif,
3401 enum ieee80211_ampdu_mlme_action action, 3435 enum ieee80211_ampdu_mlme_action action,
3402 struct ieee80211_sta *sta, u16 tid, u16 *ssn) 3436 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
3437 u8 buf_size)
3403{ 3438{
3404 struct iwl_priv *priv = hw->priv; 3439 struct iwl_priv *priv = hw->priv;
3405 int ret = -EINVAL; 3440 int ret = -EINVAL;
3441 struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
3406 3442
3407 IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n", 3443 IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
3408 sta->addr, tid); 3444 sta->addr, tid);
@@ -3457,11 +3493,28 @@ int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
3457 } 3493 }
3458 break; 3494 break;
3459 case IEEE80211_AMPDU_TX_OPERATIONAL: 3495 case IEEE80211_AMPDU_TX_OPERATIONAL:
3496 /*
3497 * If the limit is 0, then it wasn't initialised yet,
3498 * use the default. We can do that since we take the
3499 * minimum below, and we don't want to go above our
3500 * default due to hardware restrictions.
3501 */
3502 if (sta_priv->max_agg_bufsize == 0)
3503 sta_priv->max_agg_bufsize =
3504 LINK_QUAL_AGG_FRAME_LIMIT_DEF;
3505
3506 /*
3507 * Even though in theory the peer could have different
3508 * aggregation reorder buffer sizes for different sessions,
3509 * our ucode doesn't allow for that and has a global limit
3510 * for each station. Therefore, use the minimum of all the
3511 * aggregation sessions and our default value.
3512 */
3513 sta_priv->max_agg_bufsize =
3514 min(sta_priv->max_agg_bufsize, buf_size);
3515
3460 if (priv->cfg->ht_params && 3516 if (priv->cfg->ht_params &&
3461 priv->cfg->ht_params->use_rts_for_aggregation) { 3517 priv->cfg->ht_params->use_rts_for_aggregation) {
3462 struct iwl_station_priv *sta_priv =
3463 (void *) sta->drv_priv;
3464
3465 /* 3518 /*
3466 * switch to RTS/CTS if it is the prefer protection 3519 * switch to RTS/CTS if it is the prefer protection
3467 * method for HT traffic 3520 * method for HT traffic
@@ -3469,9 +3522,13 @@ int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
3469 3522
3470 sta_priv->lq_sta.lq.general_params.flags |= 3523 sta_priv->lq_sta.lq.general_params.flags |=
3471 LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK; 3524 LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
3472 iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
3473 &sta_priv->lq_sta.lq, CMD_ASYNC, false);
3474 } 3525 }
3526
3527 sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit =
3528 sta_priv->max_agg_bufsize;
3529
3530 iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
3531 &sta_priv->lq_sta.lq, CMD_ASYNC, false);
3475 ret = 0; 3532 ret = 0;
3476 break; 3533 break;
3477 } 3534 }
@@ -3709,6 +3766,97 @@ done:
3709 IWL_DEBUG_MAC80211(priv, "leave\n"); 3766 IWL_DEBUG_MAC80211(priv, "leave\n");
3710} 3767}
3711 3768
3769static void iwlagn_disable_roc(struct iwl_priv *priv)
3770{
3771 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
3772 struct ieee80211_channel *chan = ACCESS_ONCE(priv->hw->conf.channel);
3773
3774 lockdep_assert_held(&priv->mutex);
3775
3776 if (!ctx->is_active)
3777 return;
3778
3779 ctx->staging.dev_type = RXON_DEV_TYPE_2STA;
3780 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
3781 iwl_set_rxon_channel(priv, chan, ctx);
3782 iwl_set_flags_for_band(priv, ctx, chan->band, NULL);
3783
3784 priv->_agn.hw_roc_channel = NULL;
3785
3786 iwlcore_commit_rxon(priv, ctx);
3787
3788 ctx->is_active = false;
3789}
3790
3791static void iwlagn_bg_roc_done(struct work_struct *work)
3792{
3793 struct iwl_priv *priv = container_of(work, struct iwl_priv,
3794 _agn.hw_roc_work.work);
3795
3796 mutex_lock(&priv->mutex);
3797 ieee80211_remain_on_channel_expired(priv->hw);
3798 iwlagn_disable_roc(priv);
3799 mutex_unlock(&priv->mutex);
3800}
3801
3802#ifdef CONFIG_IWL5000
3803static int iwl_mac_remain_on_channel(struct ieee80211_hw *hw,
3804 struct ieee80211_channel *channel,
3805 enum nl80211_channel_type channel_type,
3806 int duration)
3807{
3808 struct iwl_priv *priv = hw->priv;
3809 int err = 0;
3810
3811 if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
3812 return -EOPNOTSUPP;
3813
3814 if (!(priv->contexts[IWL_RXON_CTX_PAN].interface_modes &
3815 BIT(NL80211_IFTYPE_P2P_CLIENT)))
3816 return -EOPNOTSUPP;
3817
3818 mutex_lock(&priv->mutex);
3819
3820 if (priv->contexts[IWL_RXON_CTX_PAN].is_active ||
3821 test_bit(STATUS_SCAN_HW, &priv->status)) {
3822 err = -EBUSY;
3823 goto out;
3824 }
3825
3826 priv->contexts[IWL_RXON_CTX_PAN].is_active = true;
3827 priv->_agn.hw_roc_channel = channel;
3828 priv->_agn.hw_roc_chantype = channel_type;
3829 priv->_agn.hw_roc_duration = DIV_ROUND_UP(duration * 1000, 1024);
3830 iwlcore_commit_rxon(priv, &priv->contexts[IWL_RXON_CTX_PAN]);
3831 queue_delayed_work(priv->workqueue, &priv->_agn.hw_roc_work,
3832 msecs_to_jiffies(duration + 20));
3833
3834 msleep(IWL_MIN_SLOT_TIME); /* TU is almost ms */
3835 ieee80211_ready_on_channel(priv->hw);
3836
3837 out:
3838 mutex_unlock(&priv->mutex);
3839
3840 return err;
3841}
3842
3843static int iwl_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
3844{
3845 struct iwl_priv *priv = hw->priv;
3846
3847 if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
3848 return -EOPNOTSUPP;
3849
3850 cancel_delayed_work_sync(&priv->_agn.hw_roc_work);
3851
3852 mutex_lock(&priv->mutex);
3853 iwlagn_disable_roc(priv);
3854 mutex_unlock(&priv->mutex);
3855
3856 return 0;
3857}
3858#endif
3859
3712/***************************************************************************** 3860/*****************************************************************************
3713 * 3861 *
3714 * driver setup and teardown 3862 * driver setup and teardown
@@ -3730,6 +3878,7 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
3730 INIT_WORK(&priv->bt_runtime_config, iwl_bg_bt_runtime_config); 3878 INIT_WORK(&priv->bt_runtime_config, iwl_bg_bt_runtime_config);
3731 INIT_DELAYED_WORK(&priv->init_alive_start, iwl_bg_init_alive_start); 3879 INIT_DELAYED_WORK(&priv->init_alive_start, iwl_bg_init_alive_start);
3732 INIT_DELAYED_WORK(&priv->alive_start, iwl_bg_alive_start); 3880 INIT_DELAYED_WORK(&priv->alive_start, iwl_bg_alive_start);
3881 INIT_DELAYED_WORK(&priv->_agn.hw_roc_work, iwlagn_bg_roc_done);
3733 3882
3734 iwl_setup_scan_deferred_work(priv); 3883 iwl_setup_scan_deferred_work(priv);
3735 3884
@@ -3898,6 +4047,8 @@ struct ieee80211_ops iwlagn_hw_ops = {
3898 .channel_switch = iwlagn_mac_channel_switch, 4047 .channel_switch = iwlagn_mac_channel_switch,
3899 .flush = iwlagn_mac_flush, 4048 .flush = iwlagn_mac_flush,
3900 .tx_last_beacon = iwl_mac_tx_last_beacon, 4049 .tx_last_beacon = iwl_mac_tx_last_beacon,
4050 .remain_on_channel = iwl_mac_remain_on_channel,
4051 .cancel_remain_on_channel = iwl_mac_cancel_remain_on_channel,
3901}; 4052};
3902#endif 4053#endif
3903 4054
@@ -3905,7 +4056,7 @@ static void iwl_hw_detect(struct iwl_priv *priv)
3905{ 4056{
3906 priv->hw_rev = _iwl_read32(priv, CSR_HW_REV); 4057 priv->hw_rev = _iwl_read32(priv, CSR_HW_REV);
3907 priv->hw_wa_rev = _iwl_read32(priv, CSR_HW_REV_WA_REG); 4058 priv->hw_wa_rev = _iwl_read32(priv, CSR_HW_REV_WA_REG);
3908 pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &priv->rev_id); 4059 priv->rev_id = priv->pci_dev->revision;
3909 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id); 4060 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id);
3910} 4061}
3911 4062
@@ -4025,6 +4176,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4025 priv->contexts[IWL_RXON_CTX_PAN].mcast_queue = IWL_IPAN_MCAST_QUEUE; 4176 priv->contexts[IWL_RXON_CTX_PAN].mcast_queue = IWL_IPAN_MCAST_QUEUE;
4026 priv->contexts[IWL_RXON_CTX_PAN].interface_modes = 4177 priv->contexts[IWL_RXON_CTX_PAN].interface_modes =
4027 BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP); 4178 BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP);
4179#ifdef CONFIG_IWL_P2P
4180 priv->contexts[IWL_RXON_CTX_PAN].interface_modes |=
4181 BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
4182#endif
4028 priv->contexts[IWL_RXON_CTX_PAN].ap_devtype = RXON_DEV_TYPE_CP; 4183 priv->contexts[IWL_RXON_CTX_PAN].ap_devtype = RXON_DEV_TYPE_CP;
4029 priv->contexts[IWL_RXON_CTX_PAN].station_devtype = RXON_DEV_TYPE_2STA; 4184 priv->contexts[IWL_RXON_CTX_PAN].station_devtype = RXON_DEV_TYPE_2STA;
4030 priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P; 4185 priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P;
@@ -4272,6 +4427,9 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
4272 * we need to set STATUS_EXIT_PENDING bit. 4427 * we need to set STATUS_EXIT_PENDING bit.
4273 */ 4428 */
4274 set_bit(STATUS_EXIT_PENDING, &priv->status); 4429 set_bit(STATUS_EXIT_PENDING, &priv->status);
4430
4431 iwl_leds_exit(priv);
4432
4275 if (priv->mac80211_registered) { 4433 if (priv->mac80211_registered) {
4276 ieee80211_unregister_hw(priv->hw); 4434 ieee80211_unregister_hw(priv->hw);
4277 priv->mac80211_registered = 0; 4435 priv->mac80211_registered = 0;
@@ -4492,6 +4650,49 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
4492 {IWL_PCI_DEVICE(0x0896, 0x5025, iwl130_bgn_cfg)}, 4650 {IWL_PCI_DEVICE(0x0896, 0x5025, iwl130_bgn_cfg)},
4493 {IWL_PCI_DEVICE(0x0896, 0x5027, iwl130_bg_cfg)}, 4651 {IWL_PCI_DEVICE(0x0896, 0x5027, iwl130_bg_cfg)},
4494 4652
4653/* 2x00 Series */
4654 {IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_2bgn_cfg)},
4655 {IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_2bgn_cfg)},
4656 {IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_2bgn_cfg)},
4657 {IWL_PCI_DEVICE(0x0890, 0x4026, iwl2000_2bg_cfg)},
4658 {IWL_PCI_DEVICE(0x0891, 0x4226, iwl2000_2bg_cfg)},
4659 {IWL_PCI_DEVICE(0x0890, 0x4426, iwl2000_2bg_cfg)},
4660
4661/* 2x30 Series */
4662 {IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)},
4663 {IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_2bgn_cfg)},
4664 {IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_2bgn_cfg)},
4665 {IWL_PCI_DEVICE(0x0887, 0x4066, iwl2030_2bg_cfg)},
4666 {IWL_PCI_DEVICE(0x0888, 0x4266, iwl2030_2bg_cfg)},
4667 {IWL_PCI_DEVICE(0x0887, 0x4466, iwl2030_2bg_cfg)},
4668
4669/* 6x35 Series */
4670 {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)},
4671 {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)},
4672 {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)},
4673 {IWL_PCI_DEVICE(0x088E, 0x4064, iwl6035_2abg_cfg)},
4674 {IWL_PCI_DEVICE(0x088F, 0x4264, iwl6035_2abg_cfg)},
4675 {IWL_PCI_DEVICE(0x088E, 0x4464, iwl6035_2abg_cfg)},
4676 {IWL_PCI_DEVICE(0x088E, 0x4066, iwl6035_2bg_cfg)},
4677 {IWL_PCI_DEVICE(0x088F, 0x4266, iwl6035_2bg_cfg)},
4678 {IWL_PCI_DEVICE(0x088E, 0x4466, iwl6035_2bg_cfg)},
4679
4680/* 200 Series */
4681 {IWL_PCI_DEVICE(0x0894, 0x0022, iwl200_bgn_cfg)},
4682 {IWL_PCI_DEVICE(0x0895, 0x0222, iwl200_bgn_cfg)},
4683 {IWL_PCI_DEVICE(0x0894, 0x0422, iwl200_bgn_cfg)},
4684 {IWL_PCI_DEVICE(0x0894, 0x0026, iwl200_bg_cfg)},
4685 {IWL_PCI_DEVICE(0x0895, 0x0226, iwl200_bg_cfg)},
4686 {IWL_PCI_DEVICE(0x0894, 0x0426, iwl200_bg_cfg)},
4687
4688/* 230 Series */
4689 {IWL_PCI_DEVICE(0x0892, 0x0062, iwl230_bgn_cfg)},
4690 {IWL_PCI_DEVICE(0x0893, 0x0262, iwl230_bgn_cfg)},
4691 {IWL_PCI_DEVICE(0x0892, 0x0462, iwl230_bgn_cfg)},
4692 {IWL_PCI_DEVICE(0x0892, 0x0066, iwl230_bg_cfg)},
4693 {IWL_PCI_DEVICE(0x0893, 0x0266, iwl230_bg_cfg)},
4694 {IWL_PCI_DEVICE(0x0892, 0x0466, iwl230_bg_cfg)},
4695
4495#endif /* CONFIG_IWL5000 */ 4696#endif /* CONFIG_IWL5000 */
4496 4697
4497 {0} 4698 {0}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
index da303585f80..d00e1ea50a8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -96,6 +96,17 @@ extern struct iwl_cfg iwl100_bgn_cfg;
96extern struct iwl_cfg iwl100_bg_cfg; 96extern struct iwl_cfg iwl100_bg_cfg;
97extern struct iwl_cfg iwl130_bgn_cfg; 97extern struct iwl_cfg iwl130_bgn_cfg;
98extern struct iwl_cfg iwl130_bg_cfg; 98extern struct iwl_cfg iwl130_bg_cfg;
99extern struct iwl_cfg iwl2000_2bgn_cfg;
100extern struct iwl_cfg iwl2000_2bg_cfg;
101extern struct iwl_cfg iwl2030_2bgn_cfg;
102extern struct iwl_cfg iwl2030_2bg_cfg;
103extern struct iwl_cfg iwl6035_2agn_cfg;
104extern struct iwl_cfg iwl6035_2abg_cfg;
105extern struct iwl_cfg iwl6035_2bg_cfg;
106extern struct iwl_cfg iwl200_bg_cfg;
107extern struct iwl_cfg iwl200_bgn_cfg;
108extern struct iwl_cfg iwl230_bg_cfg;
109extern struct iwl_cfg iwl230_bgn_cfg;
99 110
100extern struct iwl_mod_params iwlagn_mod_params; 111extern struct iwl_mod_params iwlagn_mod_params;
101extern struct iwl_hcmd_ops iwlagn_hcmd; 112extern struct iwl_hcmd_ops iwlagn_hcmd;
@@ -185,7 +196,6 @@ void iwlagn_rx_reply_rx(struct iwl_priv *priv,
185 struct iwl_rx_mem_buffer *rxb); 196 struct iwl_rx_mem_buffer *rxb);
186void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv, 197void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
187 struct iwl_rx_mem_buffer *rxb); 198 struct iwl_rx_mem_buffer *rxb);
188void iwl_rx_handle(struct iwl_priv *priv);
189 199
190/* tx */ 200/* tx */
191void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq); 201void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
@@ -330,6 +340,21 @@ void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
330int iwlcore_eeprom_acquire_semaphore(struct iwl_priv *priv); 340int iwlcore_eeprom_acquire_semaphore(struct iwl_priv *priv);
331void iwlcore_eeprom_release_semaphore(struct iwl_priv *priv); 341void iwlcore_eeprom_release_semaphore(struct iwl_priv *priv);
332 342
343/* notification wait support */
344void __acquires(wait_entry)
345iwlagn_init_notification_wait(struct iwl_priv *priv,
346 struct iwl_notification_wait *wait_entry,
347 void (*fn)(struct iwl_priv *priv,
348 struct iwl_rx_packet *pkt),
349 u8 cmd);
350signed long __releases(wait_entry)
351iwlagn_wait_notification(struct iwl_priv *priv,
352 struct iwl_notification_wait *wait_entry,
353 unsigned long timeout);
354void __releases(wait_entry)
355iwlagn_remove_notification(struct iwl_priv *priv,
356 struct iwl_notification_wait *wait_entry);
357
333/* mac80211 handlers (for 4965) */ 358/* mac80211 handlers (for 4965) */
334int iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb); 359int iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
335int iwlagn_mac_start(struct ieee80211_hw *hw); 360int iwlagn_mac_start(struct ieee80211_hw *hw);
@@ -349,7 +374,8 @@ void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
349int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, 374int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
350 struct ieee80211_vif *vif, 375 struct ieee80211_vif *vif,
351 enum ieee80211_ampdu_mlme_action action, 376 enum ieee80211_ampdu_mlme_action action,
352 struct ieee80211_sta *sta, u16 tid, u16 *ssn); 377 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
378 u8 buf_size);
353int iwlagn_mac_sta_add(struct ieee80211_hw *hw, 379int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
354 struct ieee80211_vif *vif, 380 struct ieee80211_vif *vif,
355 struct ieee80211_sta *sta); 381 struct ieee80211_sta *sta);
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index f893d4a6aa8..0a1d4aeb36a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -178,7 +178,6 @@ enum {
178 REPLY_BT_COEX_PRIO_TABLE = 0xcc, 178 REPLY_BT_COEX_PRIO_TABLE = 0xcc,
179 REPLY_BT_COEX_PROT_ENV = 0xcd, 179 REPLY_BT_COEX_PROT_ENV = 0xcd,
180 REPLY_BT_COEX_PROFILE_NOTIF = 0xce, 180 REPLY_BT_COEX_PROFILE_NOTIF = 0xce,
181 REPLY_BT_COEX_SCO = 0xcf,
182 181
183 /* PAN commands */ 182 /* PAN commands */
184 REPLY_WIPAN_PARAMS = 0xb2, 183 REPLY_WIPAN_PARAMS = 0xb2,
@@ -189,6 +188,7 @@ enum {
189 REPLY_WIPAN_WEPKEY = 0xb8, /* use REPLY_WEPKEY structure */ 188 REPLY_WIPAN_WEPKEY = 0xb8, /* use REPLY_WEPKEY structure */
190 REPLY_WIPAN_P2P_CHANNEL_SWITCH = 0xb9, 189 REPLY_WIPAN_P2P_CHANNEL_SWITCH = 0xb9,
191 REPLY_WIPAN_NOA_NOTIFICATION = 0xbc, 190 REPLY_WIPAN_NOA_NOTIFICATION = 0xbc,
191 REPLY_WIPAN_DEACTIVATION_COMPLETE = 0xbd,
192 192
193 REPLY_MAX = 0xff 193 REPLY_MAX = 0xff
194}; 194};
@@ -3082,6 +3082,13 @@ struct iwl4965_beacon_notif {
3082 __le32 ibss_mgr_status; 3082 __le32 ibss_mgr_status;
3083} __packed; 3083} __packed;
3084 3084
3085struct iwlagn_beacon_notif {
3086 struct iwlagn_tx_resp beacon_notify_hdr;
3087 __le32 low_tsf;
3088 __le32 high_tsf;
3089 __le32 ibss_mgr_status;
3090} __packed;
3091
3085/* 3092/*
3086 * REPLY_TX_BEACON = 0x91 (command, has simple generic response) 3093 * REPLY_TX_BEACON = 0x91 (command, has simple generic response)
3087 */ 3094 */
@@ -4369,6 +4376,11 @@ int iwl_agn_check_rxon_cmd(struct iwl_priv *priv);
4369 * REPLY_WIPAN_PARAMS = 0xb2 (Commands and Notification) 4376 * REPLY_WIPAN_PARAMS = 0xb2 (Commands and Notification)
4370 */ 4377 */
4371 4378
4379/*
4380 * Minimum slot time in TU
4381 */
4382#define IWL_MIN_SLOT_TIME 20
4383
4372/** 4384/**
4373 * struct iwl_wipan_slot 4385 * struct iwl_wipan_slot
4374 * @width: Time in TU 4386 * @width: Time in TU
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index efbde1f1a8b..4ad89389a0a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -219,15 +219,12 @@ int iwlcore_init_geos(struct iwl_priv *priv)
219 if (!is_channel_valid(ch)) 219 if (!is_channel_valid(ch))
220 continue; 220 continue;
221 221
222 if (is_channel_a_band(ch)) 222 sband = &priv->bands[ch->band];
223 sband = &priv->bands[IEEE80211_BAND_5GHZ];
224 else
225 sband = &priv->bands[IEEE80211_BAND_2GHZ];
226 223
227 geo_ch = &sband->channels[sband->n_channels++]; 224 geo_ch = &sband->channels[sband->n_channels++];
228 225
229 geo_ch->center_freq = 226 geo_ch->center_freq =
230 ieee80211_channel_to_frequency(ch->channel); 227 ieee80211_channel_to_frequency(ch->channel, ch->band);
231 geo_ch->max_power = ch->max_power_avg; 228 geo_ch->max_power = ch->max_power_avg;
232 geo_ch->max_antenna_gain = 0xff; 229 geo_ch->max_antenna_gain = 0xff;
233 geo_ch->hw_value = ch->channel; 230 geo_ch->hw_value = ch->channel;
@@ -1161,6 +1158,8 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1161{ 1158{
1162 int ret; 1159 int ret;
1163 s8 prev_tx_power; 1160 s8 prev_tx_power;
1161 bool defer;
1162 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1164 1163
1165 lockdep_assert_held(&priv->mutex); 1164 lockdep_assert_held(&priv->mutex);
1166 1165
@@ -1188,10 +1187,15 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1188 if (!iwl_is_ready_rf(priv)) 1187 if (!iwl_is_ready_rf(priv))
1189 return -EIO; 1188 return -EIO;
1190 1189
1191 /* scan complete use tx_power_next, need to be updated */ 1190 /* scan complete and commit_rxon use tx_power_next value,
1191 * it always need to be updated for newest request */
1192 priv->tx_power_next = tx_power; 1192 priv->tx_power_next = tx_power;
1193 if (test_bit(STATUS_SCANNING, &priv->status) && !force) { 1193
1194 IWL_DEBUG_INFO(priv, "Deferring tx power set while scanning\n"); 1194 /* do not set tx power when scanning or channel changing */
1195 defer = test_bit(STATUS_SCANNING, &priv->status) ||
1196 memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
1197 if (defer && !force) {
1198 IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
1195 return 0; 1199 return 0;
1196 } 1200 }
1197 1201
@@ -1403,9 +1407,10 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1403 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; 1407 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1404 struct iwl_rxon_context *tmp, *ctx = NULL; 1408 struct iwl_rxon_context *tmp, *ctx = NULL;
1405 int err; 1409 int err;
1410 enum nl80211_iftype viftype = ieee80211_vif_type_p2p(vif);
1406 1411
1407 IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n", 1412 IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
1408 vif->type, vif->addr); 1413 viftype, vif->addr);
1409 1414
1410 mutex_lock(&priv->mutex); 1415 mutex_lock(&priv->mutex);
1411 1416
@@ -1429,7 +1434,7 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1429 continue; 1434 continue;
1430 } 1435 }
1431 1436
1432 if (!(possible_modes & BIT(vif->type))) 1437 if (!(possible_modes & BIT(viftype)))
1433 continue; 1438 continue;
1434 1439
1435 /* have maybe usable context w/o interface */ 1440 /* have maybe usable context w/o interface */
@@ -1675,7 +1680,6 @@ void iwl_clear_traffic_stats(struct iwl_priv *priv)
1675{ 1680{
1676 memset(&priv->tx_stats, 0, sizeof(struct traffic_stats)); 1681 memset(&priv->tx_stats, 0, sizeof(struct traffic_stats));
1677 memset(&priv->rx_stats, 0, sizeof(struct traffic_stats)); 1682 memset(&priv->rx_stats, 0, sizeof(struct traffic_stats));
1678 priv->led_tpt = 0;
1679} 1683}
1680 1684
1681/* 1685/*
@@ -1768,7 +1772,6 @@ void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
1768 stats->data_cnt++; 1772 stats->data_cnt++;
1769 stats->data_bytes += len; 1773 stats->data_bytes += len;
1770 } 1774 }
1771 iwl_leds_background(priv);
1772} 1775}
1773EXPORT_SYMBOL(iwl_update_stats); 1776EXPORT_SYMBOL(iwl_update_stats);
1774#endif 1777#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index a3474376fdb..e0ec17079dc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -227,8 +227,6 @@ struct iwl_lib_ops {
227 227
228struct iwl_led_ops { 228struct iwl_led_ops {
229 int (*cmd)(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd); 229 int (*cmd)(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd);
230 int (*on)(struct iwl_priv *priv);
231 int (*off)(struct iwl_priv *priv);
232}; 230};
233 231
234/* NIC specific ops */ 232/* NIC specific ops */
@@ -307,7 +305,6 @@ struct iwl_base_params {
307 u16 led_compensation; 305 u16 led_compensation;
308 const bool broken_powersave; 306 const bool broken_powersave;
309 int chain_noise_num_beacons; 307 int chain_noise_num_beacons;
310 const bool supports_idle;
311 bool adv_thermal_throttle; 308 bool adv_thermal_throttle;
312 bool support_ct_kill_exit; 309 bool support_ct_kill_exit;
313 const bool support_wimax_coexist; 310 const bool support_wimax_coexist;
@@ -366,6 +363,7 @@ struct iwl_ht_params {
366 * @adv_pm: advance power management 363 * @adv_pm: advance power management
367 * @rx_with_siso_diversity: 1x1 device with rx antenna diversity 364 * @rx_with_siso_diversity: 1x1 device with rx antenna diversity
368 * @internal_wimax_coex: internal wifi/wimax combo device 365 * @internal_wimax_coex: internal wifi/wimax combo device
366 * @iq_invert: I/Q inversion
369 * 367 *
370 * We enable the driver to be backward compatible wrt API version. The 368 * We enable the driver to be backward compatible wrt API version. The
371 * driver specifies which APIs it supports (with @ucode_api_max being the 369 * driver specifies which APIs it supports (with @ucode_api_max being the
@@ -415,6 +413,7 @@ struct iwl_cfg {
415 const bool adv_pm; 413 const bool adv_pm;
416 const bool rx_with_siso_diversity; 414 const bool rx_with_siso_diversity;
417 const bool internal_wimax_coex; 415 const bool internal_wimax_coex;
416 const bool iq_invert;
418}; 417};
419 418
420/*************************** 419/***************************
@@ -494,18 +493,6 @@ static inline void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
494static inline void iwl_update_stats(struct iwl_priv *priv, bool is_tx, 493static inline void iwl_update_stats(struct iwl_priv *priv, bool is_tx,
495 __le16 fc, u16 len) 494 __le16 fc, u16 len)
496{ 495{
497 struct traffic_stats *stats;
498
499 if (is_tx)
500 stats = &priv->tx_stats;
501 else
502 stats = &priv->rx_stats;
503
504 if (ieee80211_is_data(fc)) {
505 /* data */
506 stats->data_bytes += len;
507 }
508 iwl_leds_background(priv);
509} 496}
510#endif 497#endif
511/***************************************************** 498/*****************************************************
@@ -755,6 +742,17 @@ static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
755 return priv->hw->wiphy->bands[band]; 742 return priv->hw->wiphy->bands[band];
756} 743}
757 744
745static inline bool iwl_advanced_bt_coexist(struct iwl_priv *priv)
746{
747 return priv->cfg->bt_params &&
748 priv->cfg->bt_params->advanced_bt_coexist;
749}
750
751static inline bool iwl_bt_statistics(struct iwl_priv *priv)
752{
753 return priv->cfg->bt_params && priv->cfg->bt_params->bt_statistics;
754}
755
758extern bool bt_coex_active; 756extern bool bt_coex_active;
759extern bool bt_siso_mode; 757extern bool bt_siso_mode;
760 758
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index b80bf7dff55..f52bc040bcb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -290,7 +290,7 @@
290 290
291 291
292/* HW REV */ 292/* HW REV */
293#define CSR_HW_REV_TYPE_MSK (0x00000F0) 293#define CSR_HW_REV_TYPE_MSK (0x00001F0)
294#define CSR_HW_REV_TYPE_3945 (0x00000D0) 294#define CSR_HW_REV_TYPE_3945 (0x00000D0)
295#define CSR_HW_REV_TYPE_4965 (0x0000000) 295#define CSR_HW_REV_TYPE_4965 (0x0000000)
296#define CSR_HW_REV_TYPE_5300 (0x0000020) 296#define CSR_HW_REV_TYPE_5300 (0x0000020)
@@ -300,9 +300,15 @@
300#define CSR_HW_REV_TYPE_1000 (0x0000060) 300#define CSR_HW_REV_TYPE_1000 (0x0000060)
301#define CSR_HW_REV_TYPE_6x00 (0x0000070) 301#define CSR_HW_REV_TYPE_6x00 (0x0000070)
302#define CSR_HW_REV_TYPE_6x50 (0x0000080) 302#define CSR_HW_REV_TYPE_6x50 (0x0000080)
303#define CSR_HW_REV_TYPE_6x50g2 (0x0000084) 303#define CSR_HW_REV_TYPE_6150 (0x0000084)
304#define CSR_HW_REV_TYPE_6x00g2 (0x00000B0) 304#define CSR_HW_REV_TYPE_6x05 (0x00000B0)
305#define CSR_HW_REV_TYPE_NONE (0x00000F0) 305#define CSR_HW_REV_TYPE_6x30 CSR_HW_REV_TYPE_6x05
306#define CSR_HW_REV_TYPE_6x35 CSR_HW_REV_TYPE_6x05
307#define CSR_HW_REV_TYPE_2x30 (0x00000C0)
308#define CSR_HW_REV_TYPE_2x00 (0x0000100)
309#define CSR_HW_REV_TYPE_200 (0x0000110)
310#define CSR_HW_REV_TYPE_230 (0x0000120)
311#define CSR_HW_REV_TYPE_NONE (0x00001F0)
306 312
307/* EEPROM REG */ 313/* EEPROM REG */
308#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001) 314#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
@@ -376,6 +382,8 @@
376#define CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6 (0x00000004) 382#define CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6 (0x00000004)
377#define CSR_GP_DRIVER_REG_BIT_6050_1x2 (0x00000008) 383#define CSR_GP_DRIVER_REG_BIT_6050_1x2 (0x00000008)
378 384
385#define CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER (0x00000080)
386
379/* GIO Chicken Bits (PCI Express bus link power management) */ 387/* GIO Chicken Bits (PCI Express bus link power management) */
380#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000) 388#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000)
381#define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER (0x20000000) 389#define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER (0x20000000)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index 6fe80b5e7a1..bc7a965c18f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -207,18 +207,19 @@ static ssize_t iwl_dbgfs_rx_statistics_read(struct file *file,
207 return ret; 207 return ret;
208} 208}
209 209
210#define BYTE1_MASK 0x000000ff;
211#define BYTE2_MASK 0x0000ffff;
212#define BYTE3_MASK 0x00ffffff;
213static ssize_t iwl_dbgfs_sram_read(struct file *file, 210static ssize_t iwl_dbgfs_sram_read(struct file *file,
214 char __user *user_buf, 211 char __user *user_buf,
215 size_t count, loff_t *ppos) 212 size_t count, loff_t *ppos)
216{ 213{
217 u32 val; 214 u32 val = 0;
218 char *buf; 215 char *buf;
219 ssize_t ret; 216 ssize_t ret;
220 int i; 217 int i = 0;
218 bool device_format = false;
219 int offset = 0;
220 int len = 0;
221 int pos = 0; 221 int pos = 0;
222 int sram;
222 struct iwl_priv *priv = file->private_data; 223 struct iwl_priv *priv = file->private_data;
223 size_t bufsz; 224 size_t bufsz;
224 225
@@ -230,35 +231,62 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
230 else 231 else
231 priv->dbgfs_sram_len = priv->ucode_data.len; 232 priv->dbgfs_sram_len = priv->ucode_data.len;
232 } 233 }
233 bufsz = 30 + priv->dbgfs_sram_len * sizeof(char) * 10; 234 len = priv->dbgfs_sram_len;
235
236 if (len == -4) {
237 device_format = true;
238 len = 4;
239 }
240
241 bufsz = 50 + len * 4;
234 buf = kmalloc(bufsz, GFP_KERNEL); 242 buf = kmalloc(bufsz, GFP_KERNEL);
235 if (!buf) 243 if (!buf)
236 return -ENOMEM; 244 return -ENOMEM;
245
237 pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n", 246 pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
238 priv->dbgfs_sram_len); 247 len);
239 pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n", 248 pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
240 priv->dbgfs_sram_offset); 249 priv->dbgfs_sram_offset);
241 for (i = priv->dbgfs_sram_len; i > 0; i -= 4) { 250
242 val = iwl_read_targ_mem(priv, priv->dbgfs_sram_offset + \ 251 /* adjust sram address since reads are only on even u32 boundaries */
243 priv->dbgfs_sram_len - i); 252 offset = priv->dbgfs_sram_offset & 0x3;
244 if (i < 4) { 253 sram = priv->dbgfs_sram_offset & ~0x3;
245 switch (i) { 254
246 case 1: 255 /* read the first u32 from sram */
247 val &= BYTE1_MASK; 256 val = iwl_read_targ_mem(priv, sram);
248 break; 257
249 case 2: 258 for (; len; len--) {
250 val &= BYTE2_MASK; 259 /* put the address at the start of every line */
251 break; 260 if (i == 0)
252 case 3: 261 pos += scnprintf(buf + pos, bufsz - pos,
253 val &= BYTE3_MASK; 262 "%08X: ", sram + offset);
254 break; 263
255 } 264 if (device_format)
265 pos += scnprintf(buf + pos, bufsz - pos,
266 "%02x", (val >> (8 * (3 - offset))) & 0xff);
267 else
268 pos += scnprintf(buf + pos, bufsz - pos,
269 "%02x ", (val >> (8 * offset)) & 0xff);
270
271 /* if all bytes processed, read the next u32 from sram */
272 if (++offset == 4) {
273 sram += 4;
274 offset = 0;
275 val = iwl_read_targ_mem(priv, sram);
256 } 276 }
257 if (!(i % 16)) 277
278 /* put in extra spaces and split lines for human readability */
279 if (++i == 16) {
280 i = 0;
258 pos += scnprintf(buf + pos, bufsz - pos, "\n"); 281 pos += scnprintf(buf + pos, bufsz - pos, "\n");
259 pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val); 282 } else if (!(i & 7)) {
283 pos += scnprintf(buf + pos, bufsz - pos, " ");
284 } else if (!(i & 3)) {
285 pos += scnprintf(buf + pos, bufsz - pos, " ");
286 }
260 } 287 }
261 pos += scnprintf(buf + pos, bufsz - pos, "\n"); 288 if (i)
289 pos += scnprintf(buf + pos, bufsz - pos, "\n");
262 290
263 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 291 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
264 kfree(buf); 292 kfree(buf);
@@ -282,6 +310,9 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
282 if (sscanf(buf, "%x,%x", &offset, &len) == 2) { 310 if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
283 priv->dbgfs_sram_offset = offset; 311 priv->dbgfs_sram_offset = offset;
284 priv->dbgfs_sram_len = len; 312 priv->dbgfs_sram_len = len;
313 } else if (sscanf(buf, "%x", &offset) == 1) {
314 priv->dbgfs_sram_offset = offset;
315 priv->dbgfs_sram_len = -4;
285 } else { 316 } else {
286 priv->dbgfs_sram_offset = 0; 317 priv->dbgfs_sram_offset = 0;
287 priv->dbgfs_sram_len = 0; 318 priv->dbgfs_sram_len = 0;
@@ -668,29 +699,6 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
668 return simple_read_from_buffer(user_buf, count, ppos, buf, pos); 699 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
669} 700}
670 701
671static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf,
672 size_t count, loff_t *ppos)
673{
674 struct iwl_priv *priv = file->private_data;
675 int pos = 0;
676 char buf[256];
677 const size_t bufsz = sizeof(buf);
678
679 pos += scnprintf(buf + pos, bufsz - pos,
680 "allow blinking: %s\n",
681 (priv->allow_blinking) ? "True" : "False");
682 if (priv->allow_blinking) {
683 pos += scnprintf(buf + pos, bufsz - pos,
684 "Led blinking rate: %u\n",
685 priv->last_blink_rate);
686 pos += scnprintf(buf + pos, bufsz - pos,
687 "Last blink time: %lu\n",
688 priv->last_blink_time);
689 }
690
691 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
692}
693
694static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file, 702static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file,
695 char __user *user_buf, 703 char __user *user_buf,
696 size_t count, loff_t *ppos) 704 size_t count, loff_t *ppos)
@@ -856,7 +864,6 @@ DEBUGFS_READ_FILE_OPS(channels);
856DEBUGFS_READ_FILE_OPS(status); 864DEBUGFS_READ_FILE_OPS(status);
857DEBUGFS_READ_WRITE_FILE_OPS(interrupt); 865DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
858DEBUGFS_READ_FILE_OPS(qos); 866DEBUGFS_READ_FILE_OPS(qos);
859DEBUGFS_READ_FILE_OPS(led);
860DEBUGFS_READ_FILE_OPS(thermal_throttling); 867DEBUGFS_READ_FILE_OPS(thermal_throttling);
861DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40); 868DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
862DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override); 869DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
@@ -1580,10 +1587,9 @@ static ssize_t iwl_dbgfs_bt_traffic_read(struct file *file,
1580 "last traffic notif: %d\n", 1587 "last traffic notif: %d\n",
1581 priv->bt_status ? "On" : "Off", priv->last_bt_traffic_load); 1588 priv->bt_status ? "On" : "Off", priv->last_bt_traffic_load);
1582 pos += scnprintf(buf + pos, bufsz - pos, "ch_announcement: %d, " 1589 pos += scnprintf(buf + pos, bufsz - pos, "ch_announcement: %d, "
1583 "sco_active: %d, kill_ack_mask: %x, " 1590 "kill_ack_mask: %x, kill_cts_mask: %x\n",
1584 "kill_cts_mask: %x\n", 1591 priv->bt_ch_announce, priv->kill_ack_mask,
1585 priv->bt_ch_announce, priv->bt_sco_active, 1592 priv->kill_cts_mask);
1586 priv->kill_ack_mask, priv->kill_cts_mask);
1587 1593
1588 pos += scnprintf(buf + pos, bufsz - pos, "bluetooth traffic load: "); 1594 pos += scnprintf(buf + pos, bufsz - pos, "bluetooth traffic load: ");
1589 switch (priv->bt_traffic_load) { 1595 switch (priv->bt_traffic_load) {
@@ -1725,7 +1731,6 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
1725 DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR); 1731 DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
1726 DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR); 1732 DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
1727 DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR); 1733 DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
1728 DEBUGFS_ADD_FILE(led, dir_data, S_IRUSR);
1729 if (!priv->cfg->base_params->broken_powersave) { 1734 if (!priv->cfg->base_params->broken_powersave) {
1730 DEBUGFS_ADD_FILE(sleep_level_override, dir_data, 1735 DEBUGFS_ADD_FILE(sleep_level_override, dir_data,
1731 S_IWUSR | S_IRUSR); 1736 S_IWUSR | S_IRUSR);
@@ -1759,13 +1764,13 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
1759 DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR); 1764 DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
1760 if (priv->cfg->base_params->ucode_tracing) 1765 if (priv->cfg->base_params->ucode_tracing)
1761 DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR); 1766 DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR);
1762 if (priv->cfg->bt_params && priv->cfg->bt_params->bt_statistics) 1767 if (iwl_bt_statistics(priv))
1763 DEBUGFS_ADD_FILE(ucode_bt_stats, dir_debug, S_IRUSR); 1768 DEBUGFS_ADD_FILE(ucode_bt_stats, dir_debug, S_IRUSR);
1764 DEBUGFS_ADD_FILE(reply_tx_error, dir_debug, S_IRUSR); 1769 DEBUGFS_ADD_FILE(reply_tx_error, dir_debug, S_IRUSR);
1765 DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR); 1770 DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
1766 DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR); 1771 DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
1767 DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR); 1772 DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
1768 if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist) 1773 if (iwl_advanced_bt_coexist(priv))
1769 DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR); 1774 DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR);
1770 if (priv->cfg->base_params->sensitivity_calib_by_driver) 1775 if (priv->cfg->base_params->sensitivity_calib_by_driver)
1771 DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf, 1776 DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 8dda67850af..ecfbef40278 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -34,6 +34,8 @@
34 34
35#include <linux/pci.h> /* for struct pci_device_id */ 35#include <linux/pci.h> /* for struct pci_device_id */
36#include <linux/kernel.h> 36#include <linux/kernel.h>
37#include <linux/wait.h>
38#include <linux/leds.h>
37#include <net/ieee80211_radiotap.h> 39#include <net/ieee80211_radiotap.h>
38 40
39#include "iwl-eeprom.h" 41#include "iwl-eeprom.h"
@@ -136,7 +138,7 @@ struct iwl_queue {
136 * space more than this */ 138 * space more than this */
137 int high_mark; /* high watermark, stop queue if free 139 int high_mark; /* high watermark, stop queue if free
138 * space less than this */ 140 * space less than this */
139} __packed; 141};
140 142
141/* One for each TFD */ 143/* One for each TFD */
142struct iwl_tx_info { 144struct iwl_tx_info {
@@ -507,6 +509,7 @@ struct iwl_station_priv {
507 atomic_t pending_frames; 509 atomic_t pending_frames;
508 bool client; 510 bool client;
509 bool asleep; 511 bool asleep;
512 u8 max_agg_bufsize;
510}; 513};
511 514
512/** 515/**
@@ -995,7 +998,6 @@ struct reply_agg_tx_error_statistics {
995 u32 unknown; 998 u32 unknown;
996}; 999};
997 1000
998#ifdef CONFIG_IWLWIFI_DEBUGFS
999/* management statistics */ 1001/* management statistics */
1000enum iwl_mgmt_stats { 1002enum iwl_mgmt_stats {
1001 MANAGEMENT_ASSOC_REQ = 0, 1003 MANAGEMENT_ASSOC_REQ = 0,
@@ -1026,16 +1028,13 @@ enum iwl_ctrl_stats {
1026}; 1028};
1027 1029
1028struct traffic_stats { 1030struct traffic_stats {
1031#ifdef CONFIG_IWLWIFI_DEBUGFS
1029 u32 mgmt[MANAGEMENT_MAX]; 1032 u32 mgmt[MANAGEMENT_MAX];
1030 u32 ctrl[CONTROL_MAX]; 1033 u32 ctrl[CONTROL_MAX];
1031 u32 data_cnt; 1034 u32 data_cnt;
1032 u64 data_bytes; 1035 u64 data_bytes;
1033};
1034#else
1035struct traffic_stats {
1036 u64 data_bytes;
1037};
1038#endif 1036#endif
1037};
1039 1038
1040/* 1039/*
1041 * iwl_switch_rxon: "channel switch" structure 1040 * iwl_switch_rxon: "channel switch" structure
@@ -1139,6 +1138,33 @@ struct iwl_force_reset {
1139 */ 1138 */
1140#define IWLAGN_EXT_BEACON_TIME_POS 22 1139#define IWLAGN_EXT_BEACON_TIME_POS 22
1141 1140
1141/**
1142 * struct iwl_notification_wait - notification wait entry
1143 * @list: list head for global list
1144 * @fn: function called with the notification
1145 * @cmd: command ID
1146 *
1147 * This structure is not used directly, to wait for a
1148 * notification declare it on the stack, and call
1149 * iwlagn_init_notification_wait() with appropriate
1150 * parameters. Then do whatever will cause the ucode
1151 * to notify the driver, and to wait for that then
1152 * call iwlagn_wait_notification().
1153 *
1154 * Each notification is one-shot. If at some point we
1155 * need to support multi-shot notifications (which
1156 * can't be allocated on the stack) we need to modify
1157 * the code for them.
1158 */
1159struct iwl_notification_wait {
1160 struct list_head list;
1161
1162 void (*fn)(struct iwl_priv *priv, struct iwl_rx_packet *pkt);
1163
1164 u8 cmd;
1165 bool triggered;
1166};
1167
1142enum iwl_rxon_context_id { 1168enum iwl_rxon_context_id {
1143 IWL_RXON_CTX_BSS, 1169 IWL_RXON_CTX_BSS,
1144 IWL_RXON_CTX_PAN, 1170 IWL_RXON_CTX_PAN,
@@ -1310,11 +1336,6 @@ struct iwl_priv {
1310 struct iwl_init_alive_resp card_alive_init; 1336 struct iwl_init_alive_resp card_alive_init;
1311 struct iwl_alive_resp card_alive; 1337 struct iwl_alive_resp card_alive;
1312 1338
1313 unsigned long last_blink_time;
1314 u8 last_blink_rate;
1315 u8 allow_blinking;
1316 u64 led_tpt;
1317
1318 u16 active_rate; 1339 u16 active_rate;
1319 1340
1320 u8 start_calib; 1341 u8 start_calib;
@@ -1463,6 +1484,17 @@ struct iwl_priv {
1463 struct iwl_bt_notif_statistics delta_statistics_bt; 1484 struct iwl_bt_notif_statistics delta_statistics_bt;
1464 struct iwl_bt_notif_statistics max_delta_bt; 1485 struct iwl_bt_notif_statistics max_delta_bt;
1465#endif 1486#endif
1487
1488 /* notification wait support */
1489 struct list_head notif_waits;
1490 spinlock_t notif_wait_lock;
1491 wait_queue_head_t notif_waitq;
1492
1493 /* remain-on-channel offload support */
1494 struct ieee80211_channel *hw_roc_channel;
1495 struct delayed_work hw_roc_work;
1496 enum nl80211_channel_type hw_roc_chantype;
1497 int hw_roc_duration;
1466 } _agn; 1498 } _agn;
1467#endif 1499#endif
1468 }; 1500 };
@@ -1472,7 +1504,6 @@ struct iwl_priv {
1472 u8 bt_status; 1504 u8 bt_status;
1473 u8 bt_traffic_load, last_bt_traffic_load; 1505 u8 bt_traffic_load, last_bt_traffic_load;
1474 bool bt_ch_announce; 1506 bool bt_ch_announce;
1475 bool bt_sco_active;
1476 bool bt_full_concurrent; 1507 bool bt_full_concurrent;
1477 bool bt_ant_couple_ok; 1508 bool bt_ant_couple_ok;
1478 __le32 kill_ack_mask; 1509 __le32 kill_ack_mask;
@@ -1547,6 +1578,10 @@ struct iwl_priv {
1547 bool hw_ready; 1578 bool hw_ready;
1548 1579
1549 struct iwl_event_log event_log; 1580 struct iwl_event_log event_log;
1581
1582 struct led_classdev led;
1583 unsigned long blink_on, blink_off;
1584 bool led_registered;
1550}; /*iwl_priv */ 1585}; /*iwl_priv */
1551 1586
1552static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id) 1587static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index 9e6f31355ee..98aa8af0119 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -247,13 +247,26 @@ struct iwl_eeprom_enhanced_txpwr {
247#define EEPROM_6050_TX_POWER_VERSION (4) 247#define EEPROM_6050_TX_POWER_VERSION (4)
248#define EEPROM_6050_EEPROM_VERSION (0x532) 248#define EEPROM_6050_EEPROM_VERSION (0x532)
249 249
250/* 6x50g2 Specific */ 250/* 6150 Specific */
251#define EEPROM_6050G2_TX_POWER_VERSION (6) 251#define EEPROM_6150_TX_POWER_VERSION (6)
252#define EEPROM_6050G2_EEPROM_VERSION (0x553) 252#define EEPROM_6150_EEPROM_VERSION (0x553)
253
254/* 6x05 Specific */
255#define EEPROM_6005_TX_POWER_VERSION (6)
256#define EEPROM_6005_EEPROM_VERSION (0x709)
257
258/* 6x30 Specific */
259#define EEPROM_6030_TX_POWER_VERSION (6)
260#define EEPROM_6030_EEPROM_VERSION (0x709)
261
262/* 2x00 Specific */
263#define EEPROM_2000_TX_POWER_VERSION (6)
264#define EEPROM_2000_EEPROM_VERSION (0x805)
265
266/* 6x35 Specific */
267#define EEPROM_6035_TX_POWER_VERSION (6)
268#define EEPROM_6035_EEPROM_VERSION (0x753)
253 269
254/* 6x00g2 Specific */
255#define EEPROM_6000G2_TX_POWER_VERSION (6)
256#define EEPROM_6000G2_EEPROM_VERSION (0x709)
257 270
258/* OTP */ 271/* OTP */
259/* lower blocks contain EEPROM image and calibration data */ 272/* lower blocks contain EEPROM image and calibration data */
@@ -264,6 +277,7 @@ struct iwl_eeprom_enhanced_txpwr {
264#define OTP_MAX_LL_ITEMS_1000 (3) /* OTP blocks for 1000 */ 277#define OTP_MAX_LL_ITEMS_1000 (3) /* OTP blocks for 1000 */
265#define OTP_MAX_LL_ITEMS_6x00 (4) /* OTP blocks for 6x00 */ 278#define OTP_MAX_LL_ITEMS_6x00 (4) /* OTP blocks for 6x00 */
266#define OTP_MAX_LL_ITEMS_6x50 (7) /* OTP blocks for 6x50 */ 279#define OTP_MAX_LL_ITEMS_6x50 (7) /* OTP blocks for 6x50 */
280#define OTP_MAX_LL_ITEMS_2x00 (4) /* OTP blocks for 2x00 */
267 281
268/* 2.4 GHz */ 282/* 2.4 GHz */
269extern const u8 iwl_eeprom_band_1[14]; 283extern const u8 iwl_eeprom_band_1[14];
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index c373b53babe..e4b953d7b7b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -108,6 +108,7 @@ const char *get_cmd_string(u8 cmd)
108 IWL_CMD(REPLY_WIPAN_WEPKEY); 108 IWL_CMD(REPLY_WIPAN_WEPKEY);
109 IWL_CMD(REPLY_WIPAN_P2P_CHANNEL_SWITCH); 109 IWL_CMD(REPLY_WIPAN_P2P_CHANNEL_SWITCH);
110 IWL_CMD(REPLY_WIPAN_NOA_NOTIFICATION); 110 IWL_CMD(REPLY_WIPAN_NOA_NOTIFICATION);
111 IWL_CMD(REPLY_WIPAN_DEACTIVATION_COMPLETE);
111 default: 112 default:
112 return "UNKNOWN"; 113 return "UNKNOWN";
113 114
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index 46ccdf406e8..074ad227522 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -48,31 +48,19 @@ module_param(led_mode, int, S_IRUGO);
48MODULE_PARM_DESC(led_mode, "0=system default, " 48MODULE_PARM_DESC(led_mode, "0=system default, "
49 "1=On(RF On)/Off(RF Off), 2=blinking"); 49 "1=On(RF On)/Off(RF Off), 2=blinking");
50 50
51static const struct { 51static const struct ieee80211_tpt_blink iwl_blink[] = {
52 u16 tpt; /* Mb/s */ 52 { .throughput = 0 * 1024 - 1, .blink_time = 334 },
53 u8 on_time; 53 { .throughput = 1 * 1024 - 1, .blink_time = 260 },
54 u8 off_time; 54 { .throughput = 5 * 1024 - 1, .blink_time = 220 },
55} blink_tbl[] = 55 { .throughput = 10 * 1024 - 1, .blink_time = 190 },
56{ 56 { .throughput = 20 * 1024 - 1, .blink_time = 170 },
57 {300, 25, 25}, 57 { .throughput = 50 * 1024 - 1, .blink_time = 150 },
58 {200, 40, 40}, 58 { .throughput = 70 * 1024 - 1, .blink_time = 130 },
59 {100, 55, 55}, 59 { .throughput = 100 * 1024 - 1, .blink_time = 110 },
60 {70, 65, 65}, 60 { .throughput = 200 * 1024 - 1, .blink_time = 80 },
61 {50, 75, 75}, 61 { .throughput = 300 * 1024 - 1, .blink_time = 50 },
62 {20, 85, 85},
63 {10, 95, 95},
64 {5, 110, 110},
65 {1, 130, 130},
66 {0, 167, 167},
67 /* SOLID_ON */
68 {-1, IWL_LED_SOLID, 0}
69}; 62};
70 63
71#define IWL_1MB_RATE (128 * 1024)
72#define IWL_LED_THRESHOLD (16)
73#define IWL_MAX_BLINK_TBL (ARRAY_SIZE(blink_tbl) - 1) /* exclude SOLID_ON */
74#define IWL_SOLID_BLINK_IDX (ARRAY_SIZE(blink_tbl) - 1)
75
76/* 64/*
77 * Adjust led blink rate to compensate on a MAC Clock difference on every HW 65 * Adjust led blink rate to compensate on a MAC Clock difference on every HW
78 * Led blink rate analysis showed an average deviation of 0% on 3945, 66 * Led blink rate analysis showed an average deviation of 0% on 3945,
@@ -97,133 +85,104 @@ static inline u8 iwl_blink_compensation(struct iwl_priv *priv,
97} 85}
98 86
99/* Set led pattern command */ 87/* Set led pattern command */
100static int iwl_led_pattern(struct iwl_priv *priv, unsigned int idx) 88static int iwl_led_cmd(struct iwl_priv *priv,
89 unsigned long on,
90 unsigned long off)
101{ 91{
102 struct iwl_led_cmd led_cmd = { 92 struct iwl_led_cmd led_cmd = {
103 .id = IWL_LED_LINK, 93 .id = IWL_LED_LINK,
104 .interval = IWL_DEF_LED_INTRVL 94 .interval = IWL_DEF_LED_INTRVL
105 }; 95 };
96 int ret;
106 97
107 BUG_ON(idx > IWL_MAX_BLINK_TBL); 98 if (!test_bit(STATUS_READY, &priv->status))
99 return -EBUSY;
108 100
109 IWL_DEBUG_LED(priv, "Led blink time compensation= %u\n", 101 if (priv->blink_on == on && priv->blink_off == off)
102 return 0;
103
104 IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n",
110 priv->cfg->base_params->led_compensation); 105 priv->cfg->base_params->led_compensation);
111 led_cmd.on = 106 led_cmd.on = iwl_blink_compensation(priv, on,
112 iwl_blink_compensation(priv, blink_tbl[idx].on_time,
113 priv->cfg->base_params->led_compensation); 107 priv->cfg->base_params->led_compensation);
114 led_cmd.off = 108 led_cmd.off = iwl_blink_compensation(priv, off,
115 iwl_blink_compensation(priv, blink_tbl[idx].off_time,
116 priv->cfg->base_params->led_compensation); 109 priv->cfg->base_params->led_compensation);
117 110
118 return priv->cfg->ops->led->cmd(priv, &led_cmd); 111 ret = priv->cfg->ops->led->cmd(priv, &led_cmd);
112 if (!ret) {
113 priv->blink_on = on;
114 priv->blink_off = off;
115 }
116 return ret;
119} 117}
120 118
121int iwl_led_start(struct iwl_priv *priv) 119static void iwl_led_brightness_set(struct led_classdev *led_cdev,
120 enum led_brightness brightness)
122{ 121{
123 return priv->cfg->ops->led->on(priv); 122 struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
124} 123 unsigned long on = 0;
125EXPORT_SYMBOL(iwl_led_start);
126 124
127int iwl_led_associate(struct iwl_priv *priv) 125 if (brightness > 0)
128{ 126 on = IWL_LED_SOLID;
129 IWL_DEBUG_LED(priv, "Associated\n");
130 if (priv->cfg->led_mode == IWL_LED_BLINK)
131 priv->allow_blinking = 1;
132 priv->last_blink_time = jiffies;
133 127
134 return 0; 128 iwl_led_cmd(priv, on, 0);
135} 129}
136EXPORT_SYMBOL(iwl_led_associate);
137 130
138int iwl_led_disassociate(struct iwl_priv *priv) 131static int iwl_led_blink_set(struct led_classdev *led_cdev,
132 unsigned long *delay_on,
133 unsigned long *delay_off)
139{ 134{
140 priv->allow_blinking = 0; 135 struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
141 136
142 return 0; 137 return iwl_led_cmd(priv, *delay_on, *delay_off);
143} 138}
144EXPORT_SYMBOL(iwl_led_disassociate);
145 139
146/* 140void iwl_leds_init(struct iwl_priv *priv)
147 * calculate blink rate according to last second Tx/Rx activities
148 */
149static int iwl_get_blink_rate(struct iwl_priv *priv)
150{
151 int i;
152 /* count both tx and rx traffic to be able to
153 * handle traffic in either direction
154 */
155 u64 current_tpt = priv->tx_stats.data_bytes +
156 priv->rx_stats.data_bytes;
157 s64 tpt = current_tpt - priv->led_tpt;
158
159 if (tpt < 0) /* wraparound */
160 tpt = -tpt;
161
162 IWL_DEBUG_LED(priv, "tpt %lld current_tpt %llu\n",
163 (long long)tpt,
164 (unsigned long long)current_tpt);
165 priv->led_tpt = current_tpt;
166
167 if (!priv->allow_blinking)
168 i = IWL_MAX_BLINK_TBL;
169 else
170 for (i = 0; i < IWL_MAX_BLINK_TBL; i++)
171 if (tpt > (blink_tbl[i].tpt * IWL_1MB_RATE))
172 break;
173
174 IWL_DEBUG_LED(priv, "LED BLINK IDX=%d\n", i);
175 return i;
176}
177
178/*
179 * this function called from handler. Since setting Led command can
180 * happen very frequent we postpone led command to be called from
181 * REPLY handler so we know ucode is up
182 */
183void iwl_leds_background(struct iwl_priv *priv)
184{ 141{
185 u8 blink_idx; 142 int mode = led_mode;
186 143 int ret;
187 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { 144
188 priv->last_blink_time = 0; 145 if (mode == IWL_LED_DEFAULT)
189 return; 146 mode = priv->cfg->led_mode;
190 } 147
191 if (iwl_is_rfkill(priv)) { 148 priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
192 priv->last_blink_time = 0; 149 wiphy_name(priv->hw->wiphy));
193 return; 150 priv->led.brightness_set = iwl_led_brightness_set;
151 priv->led.blink_set = iwl_led_blink_set;
152 priv->led.max_brightness = 1;
153
154 switch (mode) {
155 case IWL_LED_DEFAULT:
156 WARN_ON(1);
157 break;
158 case IWL_LED_BLINK:
159 priv->led.default_trigger =
160 ieee80211_create_tpt_led_trigger(priv->hw,
161 IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
162 iwl_blink, ARRAY_SIZE(iwl_blink));
163 break;
164 case IWL_LED_RF_STATE:
165 priv->led.default_trigger =
166 ieee80211_get_radio_led_name(priv->hw);
167 break;
194 } 168 }
195 169
196 if (!priv->allow_blinking) { 170 ret = led_classdev_register(&priv->pci_dev->dev, &priv->led);
197 priv->last_blink_time = 0; 171 if (ret) {
198 if (priv->last_blink_rate != IWL_SOLID_BLINK_IDX) { 172 kfree(priv->led.name);
199 priv->last_blink_rate = IWL_SOLID_BLINK_IDX;
200 iwl_led_pattern(priv, IWL_SOLID_BLINK_IDX);
201 }
202 return; 173 return;
203 } 174 }
204 if (!priv->last_blink_time ||
205 !time_after(jiffies, priv->last_blink_time +
206 msecs_to_jiffies(1000)))
207 return;
208
209 blink_idx = iwl_get_blink_rate(priv);
210 175
211 /* call only if blink rate change */ 176 priv->led_registered = true;
212 if (blink_idx != priv->last_blink_rate)
213 iwl_led_pattern(priv, blink_idx);
214
215 priv->last_blink_time = jiffies;
216 priv->last_blink_rate = blink_idx;
217} 177}
218EXPORT_SYMBOL(iwl_leds_background); 178EXPORT_SYMBOL(iwl_leds_init);
219 179
220void iwl_leds_init(struct iwl_priv *priv) 180void iwl_leds_exit(struct iwl_priv *priv)
221{ 181{
222 priv->last_blink_rate = 0; 182 if (!priv->led_registered)
223 priv->last_blink_time = 0; 183 return;
224 priv->allow_blinking = 0; 184
225 if (led_mode != IWL_LED_DEFAULT && 185 led_classdev_unregister(&priv->led);
226 led_mode != priv->cfg->led_mode) 186 kfree(priv->led.name);
227 priv->cfg->led_mode = led_mode;
228} 187}
229EXPORT_SYMBOL(iwl_leds_init); 188EXPORT_SYMBOL(iwl_leds_exit);
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.h b/drivers/net/wireless/iwlwifi/iwl-led.h
index 9079b33486e..101eef12b3b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-led.h
@@ -31,23 +31,14 @@
31struct iwl_priv; 31struct iwl_priv;
32 32
33#define IWL_LED_SOLID 11 33#define IWL_LED_SOLID 11
34#define IWL_LED_NAME_LEN 31
35#define IWL_DEF_LED_INTRVL cpu_to_le32(1000) 34#define IWL_DEF_LED_INTRVL cpu_to_le32(1000)
36 35
37#define IWL_LED_ACTIVITY (0<<1) 36#define IWL_LED_ACTIVITY (0<<1)
38#define IWL_LED_LINK (1<<1) 37#define IWL_LED_LINK (1<<1)
39 38
40enum led_type {
41 IWL_LED_TRG_TX,
42 IWL_LED_TRG_RX,
43 IWL_LED_TRG_ASSOC,
44 IWL_LED_TRG_RADIO,
45 IWL_LED_TRG_MAX,
46};
47
48/* 39/*
49 * LED mode 40 * LED mode
50 * IWL_LED_DEFAULT: use system default 41 * IWL_LED_DEFAULT: use device default
51 * IWL_LED_RF_STATE: turn LED on/off based on RF state 42 * IWL_LED_RF_STATE: turn LED on/off based on RF state
52 * LED ON = RF ON 43 * LED ON = RF ON
53 * LED OFF = RF OFF 44 * LED OFF = RF OFF
@@ -60,9 +51,6 @@ enum iwl_led_mode {
60}; 51};
61 52
62void iwl_leds_init(struct iwl_priv *priv); 53void iwl_leds_init(struct iwl_priv *priv);
63void iwl_leds_background(struct iwl_priv *priv); 54void iwl_leds_exit(struct iwl_priv *priv);
64int iwl_led_start(struct iwl_priv *priv);
65int iwl_led_associate(struct iwl_priv *priv);
66int iwl_led_disassociate(struct iwl_priv *priv);
67 55
68#endif /* __iwl_leds_h__ */ 56#endif /* __iwl_leds_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-legacy.c b/drivers/net/wireless/iwlwifi/iwl-legacy.c
index bb1a742a98a..e1ace3ce30b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-legacy.c
+++ b/drivers/net/wireless/iwlwifi/iwl-legacy.c
@@ -85,10 +85,9 @@ int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed)
85 IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n", 85 IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
86 channel->hw_value, changed); 86 channel->hw_value, changed);
87 87
88 if (unlikely(!priv->cfg->mod_params->disable_hw_scan && 88 if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) {
89 test_bit(STATUS_SCANNING, &priv->status))) {
90 scan_active = 1; 89 scan_active = 1;
91 IWL_DEBUG_MAC80211(priv, "leave - scanning\n"); 90 IWL_DEBUG_MAC80211(priv, "scan active\n");
92 } 91 }
93 92
94 if (changed & (IEEE80211_CONF_CHANGE_SMPS | 93 if (changed & (IEEE80211_CONF_CHANGE_SMPS |
@@ -332,7 +331,6 @@ static inline void iwl_set_no_assoc(struct iwl_priv *priv,
332{ 331{
333 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); 332 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
334 333
335 iwl_led_disassociate(priv);
336 /* 334 /*
337 * inform the ucode that there is no longer an 335 * inform the ucode that there is no longer an
338 * association and that no more packets should be 336 * association and that no more packets should be
@@ -520,8 +518,6 @@ void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
520 if (bss_conf->assoc) { 518 if (bss_conf->assoc) {
521 priv->timestamp = bss_conf->timestamp; 519 priv->timestamp = bss_conf->timestamp;
522 520
523 iwl_led_associate(priv);
524
525 if (!iwl_is_rfkill(priv)) 521 if (!iwl_is_rfkill(priv))
526 priv->cfg->ops->legacy->post_associate(priv); 522 priv->cfg->ops->legacy->post_associate(priv);
527 } else 523 } else
@@ -545,7 +541,6 @@ void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
545 memcpy(ctx->staging.bssid_addr, 541 memcpy(ctx->staging.bssid_addr,
546 bss_conf->bssid, ETH_ALEN); 542 bss_conf->bssid, ETH_ALEN);
547 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN); 543 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
548 iwl_led_associate(priv);
549 priv->cfg->ops->legacy->config_ap(priv); 544 priv->cfg->ops->legacy->config_ap(priv);
550 } else 545 } else
551 iwl_set_no_assoc(priv, vif); 546 iwl_set_no_assoc(priv, vif);
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 1eec18d909d..1d1bf3234d8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -226,8 +226,7 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
226 else 226 else
227 cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA; 227 cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
228 228
229 if (priv->cfg->bt_params && 229 if (iwl_advanced_bt_coexist(priv)) {
230 priv->cfg->bt_params->advanced_bt_coexist) {
231 if (!priv->cfg->bt_params->bt_sco_disable) 230 if (!priv->cfg->bt_params->bt_sco_disable)
232 cmd->flags |= IWL_POWER_BT_SCO_ENA; 231 cmd->flags |= IWL_POWER_BT_SCO_ENA;
233 else 232 else
@@ -313,8 +312,7 @@ static void iwl_power_fill_sleep_cmd(struct iwl_priv *priv,
313 else 312 else
314 cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA; 313 cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
315 314
316 if (priv->cfg->bt_params && 315 if (iwl_advanced_bt_coexist(priv)) {
317 priv->cfg->bt_params->advanced_bt_coexist) {
318 if (!priv->cfg->bt_params->bt_sco_disable) 316 if (!priv->cfg->bt_params->bt_sco_disable)
319 cmd->flags |= IWL_POWER_BT_SCO_ENA; 317 cmd->flags |= IWL_POWER_BT_SCO_ENA;
320 else 318 else
@@ -358,8 +356,7 @@ static void iwl_power_build_cmd(struct iwl_priv *priv,
358 356
359 if (priv->cfg->base_params->broken_powersave) 357 if (priv->cfg->base_params->broken_powersave)
360 iwl_power_sleep_cam_cmd(priv, cmd); 358 iwl_power_sleep_cam_cmd(priv, cmd);
361 else if (priv->cfg->base_params->supports_idle && 359 else if (priv->hw->conf.flags & IEEE80211_CONF_IDLE)
362 priv->hw->conf.flags & IEEE80211_CONF_IDLE)
363 iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20); 360 iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20);
364 else if (priv->cfg->ops->lib->tt_ops.lower_power_detection && 361 else if (priv->cfg->ops->lib->tt_ops.lower_power_detection &&
365 priv->cfg->ops->lib->tt_ops.tt_power_mode && 362 priv->cfg->ops->lib->tt_ops.tt_power_mode &&
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index 87a6fd84d4d..bc89393fb69 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -234,33 +234,20 @@ EXPORT_SYMBOL(iwl_rx_spectrum_measure_notif);
234void iwl_recover_from_statistics(struct iwl_priv *priv, 234void iwl_recover_from_statistics(struct iwl_priv *priv,
235 struct iwl_rx_packet *pkt) 235 struct iwl_rx_packet *pkt)
236{ 236{
237 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 237 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
238 !iwl_is_any_associated(priv))
238 return; 239 return;
239 if (iwl_is_any_associated(priv)) { 240
240 if (priv->cfg->ops->lib->check_ack_health) { 241 if (priv->cfg->ops->lib->check_ack_health &&
241 if (!priv->cfg->ops->lib->check_ack_health( 242 !priv->cfg->ops->lib->check_ack_health(priv, pkt)) {
242 priv, pkt)) { 243 IWL_ERR(priv, "low ack count detected, restart firmware\n");
243 /* 244 if (!iwl_force_reset(priv, IWL_FW_RESET, false))
244 * low ack count detected 245 return;
245 * restart Firmware
246 */
247 IWL_ERR(priv, "low ack count detected, "
248 "restart firmware\n");
249 if (!iwl_force_reset(priv, IWL_FW_RESET, false))
250 return;
251 }
252 }
253 if (priv->cfg->ops->lib->check_plcp_health) {
254 if (!priv->cfg->ops->lib->check_plcp_health(
255 priv, pkt)) {
256 /*
257 * high plcp error detected
258 * reset Radio
259 */
260 iwl_force_reset(priv, IWL_RF_RESET, false);
261 }
262 }
263 } 246 }
247
248 if (priv->cfg->ops->lib->check_plcp_health &&
249 !priv->cfg->ops->lib->check_plcp_health(priv, pkt))
250 iwl_force_reset(priv, IWL_RF_RESET, false);
264} 251}
265EXPORT_SYMBOL(iwl_recover_from_statistics); 252EXPORT_SYMBOL(iwl_recover_from_statistics);
266 253
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 12d9363d0af..08f1bea8b65 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -257,8 +257,7 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
257 queue_work(priv->workqueue, &priv->scan_completed); 257 queue_work(priv->workqueue, &priv->scan_completed);
258 258
259 if (priv->iw_mode != NL80211_IFTYPE_ADHOC && 259 if (priv->iw_mode != NL80211_IFTYPE_ADHOC &&
260 priv->cfg->bt_params && 260 iwl_advanced_bt_coexist(priv) &&
261 priv->cfg->bt_params->advanced_bt_coexist &&
262 priv->bt_status != scan_notif->bt_status) { 261 priv->bt_status != scan_notif->bt_status) {
263 if (scan_notif->bt_status) { 262 if (scan_notif->bt_status) {
264 /* BT on */ 263 /* BT on */
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 371abbf60ea..adcef735180 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -2517,7 +2517,7 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
2517 2517
2518 ieee80211_wake_queues(priv->hw); 2518 ieee80211_wake_queues(priv->hw);
2519 2519
2520 priv->active_rate = IWL_RATES_MASK; 2520 priv->active_rate = IWL_RATES_MASK_3945;
2521 2521
2522 iwl_power_update_mode(priv, true); 2522 iwl_power_update_mode(priv, true);
2523 2523
@@ -2535,15 +2535,14 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
2535 /* Configure Bluetooth device coexistence support */ 2535 /* Configure Bluetooth device coexistence support */
2536 priv->cfg->ops->hcmd->send_bt_config(priv); 2536 priv->cfg->ops->hcmd->send_bt_config(priv);
2537 2537
2538 set_bit(STATUS_READY, &priv->status);
2539
2538 /* Configure the adapter for unassociated operation */ 2540 /* Configure the adapter for unassociated operation */
2539 iwl3945_commit_rxon(priv, ctx); 2541 iwl3945_commit_rxon(priv, ctx);
2540 2542
2541 iwl3945_reg_txpower_periodic(priv); 2543 iwl3945_reg_txpower_periodic(priv);
2542 2544
2543 iwl_leds_init(priv);
2544
2545 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); 2545 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
2546 set_bit(STATUS_READY, &priv->status);
2547 wake_up_interruptible(&priv->wait_command_queue); 2546 wake_up_interruptible(&priv->wait_command_queue);
2548 2547
2549 return; 2548 return;
@@ -2861,16 +2860,13 @@ int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
2861 u32 extra; 2860 u32 extra;
2862 u32 suspend_time = 100; 2861 u32 suspend_time = 100;
2863 u32 scan_suspend_time = 100; 2862 u32 scan_suspend_time = 100;
2864 unsigned long flags;
2865 2863
2866 IWL_DEBUG_INFO(priv, "Scanning while associated...\n"); 2864 IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
2867 2865
2868 spin_lock_irqsave(&priv->lock, flags);
2869 if (priv->is_internal_short_scan) 2866 if (priv->is_internal_short_scan)
2870 interval = 0; 2867 interval = 0;
2871 else 2868 else
2872 interval = vif->bss_conf.beacon_int; 2869 interval = vif->bss_conf.beacon_int;
2873 spin_unlock_irqrestore(&priv->lock, flags);
2874 2870
2875 scan->suspend_time = 0; 2871 scan->suspend_time = 0;
2876 scan->max_out_time = cpu_to_le32(200 * 1024); 2872 scan->max_out_time = cpu_to_le32(200 * 1024);
@@ -3170,8 +3166,6 @@ static int iwl3945_mac_start(struct ieee80211_hw *hw)
3170 * no need to poll the killswitch state anymore */ 3166 * no need to poll the killswitch state anymore */
3171 cancel_delayed_work(&priv->_3945.rfkill_poll); 3167 cancel_delayed_work(&priv->_3945.rfkill_poll);
3172 3168
3173 iwl_led_start(priv);
3174
3175 priv->is_open = 1; 3169 priv->is_open = 1;
3176 IWL_DEBUG_MAC80211(priv, "leave\n"); 3170 IWL_DEBUG_MAC80211(priv, "leave\n");
3177 return 0; 3171 return 0;
@@ -3289,6 +3283,14 @@ static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3289 return -EOPNOTSUPP; 3283 return -EOPNOTSUPP;
3290 } 3284 }
3291 3285
3286 /*
3287 * To support IBSS RSN, don't program group keys in IBSS, the
3288 * hardware will then not attempt to decrypt the frames.
3289 */
3290 if (vif->type == NL80211_IFTYPE_ADHOC &&
3291 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
3292 return -EOPNOTSUPP;
3293
3292 static_key = !iwl_is_associated(priv, IWL_RXON_CTX_BSS); 3294 static_key = !iwl_is_associated(priv, IWL_RXON_CTX_BSS);
3293 3295
3294 if (!static_key) { 3296 if (!static_key) {
@@ -3918,7 +3920,8 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
3918 priv->contexts[IWL_RXON_CTX_BSS].interface_modes; 3920 priv->contexts[IWL_RXON_CTX_BSS].interface_modes;
3919 3921
3920 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | 3922 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
3921 WIPHY_FLAG_DISABLE_BEACON_HINTS; 3923 WIPHY_FLAG_DISABLE_BEACON_HINTS |
3924 WIPHY_FLAG_IBSS_RSN;
3922 3925
3923 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945; 3926 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
3924 /* we create the 802.11 header and a zero-length SSID element */ 3927 /* we create the 802.11 header and a zero-length SSID element */
@@ -3935,6 +3938,8 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
3935 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = 3938 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
3936 &priv->bands[IEEE80211_BAND_5GHZ]; 3939 &priv->bands[IEEE80211_BAND_5GHZ];
3937 3940
3941 iwl_leds_init(priv);
3942
3938 ret = ieee80211_register_hw(priv->hw); 3943 ret = ieee80211_register_hw(priv->hw);
3939 if (ret) { 3944 if (ret) {
3940 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret); 3945 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
@@ -4194,6 +4199,8 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
4194 4199
4195 set_bit(STATUS_EXIT_PENDING, &priv->status); 4200 set_bit(STATUS_EXIT_PENDING, &priv->status);
4196 4201
4202 iwl_leds_exit(priv);
4203
4197 if (priv->mac80211_registered) { 4204 if (priv->mac80211_registered) {
4198 ieee80211_unregister_hw(priv->hw); 4205 ieee80211_unregister_hw(priv->hw);
4199 priv->mac80211_registered = 0; 4206 priv->mac80211_registered = 0;
diff --git a/drivers/net/wireless/iwmc3200wifi/cfg80211.c b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
index 5a4982271e9..ed57e440280 100644
--- a/drivers/net/wireless/iwmc3200wifi/cfg80211.c
+++ b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
@@ -287,7 +287,8 @@ int iwm_cfg80211_inform_bss(struct iwm_priv *iwm)
287 return -EINVAL; 287 return -EINVAL;
288 } 288 }
289 289
290 freq = ieee80211_channel_to_frequency(umac_bss->channel); 290 freq = ieee80211_channel_to_frequency(umac_bss->channel,
291 band->band);
291 channel = ieee80211_get_channel(wiphy, freq); 292 channel = ieee80211_get_channel(wiphy, freq);
292 signal = umac_bss->rssi * 100; 293 signal = umac_bss->rssi * 100;
293 294
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index a944893ae3c..9a57cf6a488 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -543,7 +543,10 @@ static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf,
543 switch (le32_to_cpu(complete->status)) { 543 switch (le32_to_cpu(complete->status)) {
544 case UMAC_ASSOC_COMPLETE_SUCCESS: 544 case UMAC_ASSOC_COMPLETE_SUCCESS:
545 chan = ieee80211_get_channel(wiphy, 545 chan = ieee80211_get_channel(wiphy,
546 ieee80211_channel_to_frequency(complete->channel)); 546 ieee80211_channel_to_frequency(complete->channel,
547 complete->band == UMAC_BAND_2GHZ ?
548 IEEE80211_BAND_2GHZ :
549 IEEE80211_BAND_5GHZ));
547 if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) { 550 if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) {
548 /* Associated to a unallowed channel, disassociate. */ 551 /* Associated to a unallowed channel, disassociate. */
549 __iwm_invalidate_mlme_profile(iwm); 552 __iwm_invalidate_mlme_profile(iwm);
@@ -841,7 +844,7 @@ static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf,
841 goto err; 844 goto err;
842 } 845 }
843 846
844 freq = ieee80211_channel_to_frequency(umac_bss->channel); 847 freq = ieee80211_channel_to_frequency(umac_bss->channel, band->band);
845 channel = ieee80211_get_channel(wiphy, freq); 848 channel = ieee80211_get_channel(wiphy, freq);
846 signal = umac_bss->rssi * 100; 849 signal = umac_bss->rssi * 100;
847 850
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 698a1f7694e..30ef0351bfc 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -607,7 +607,8 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
607 /* No channel, no luck */ 607 /* No channel, no luck */
608 if (chan_no != -1) { 608 if (chan_no != -1) {
609 struct wiphy *wiphy = priv->wdev->wiphy; 609 struct wiphy *wiphy = priv->wdev->wiphy;
610 int freq = ieee80211_channel_to_frequency(chan_no); 610 int freq = ieee80211_channel_to_frequency(chan_no,
611 IEEE80211_BAND_2GHZ);
611 struct ieee80211_channel *channel = 612 struct ieee80211_channel *channel =
612 ieee80211_get_channel(wiphy, freq); 613 ieee80211_get_channel(wiphy, freq);
613 614
@@ -1597,7 +1598,8 @@ static int lbs_get_survey(struct wiphy *wiphy, struct net_device *dev,
1597 lbs_deb_enter(LBS_DEB_CFG80211); 1598 lbs_deb_enter(LBS_DEB_CFG80211);
1598 1599
1599 survey->channel = ieee80211_get_channel(wiphy, 1600 survey->channel = ieee80211_get_channel(wiphy,
1600 ieee80211_channel_to_frequency(priv->channel)); 1601 ieee80211_channel_to_frequency(priv->channel,
1602 IEEE80211_BAND_2GHZ));
1601 1603
1602 ret = lbs_get_rssi(priv, &signal, &noise); 1604 ret = lbs_get_rssi(priv, &signal, &noise);
1603 if (ret == 0) { 1605 if (ret == 0) {
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 78c4da150a7..7e8a658b767 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -145,9 +145,13 @@ int lbs_update_hw_spec(struct lbs_private *priv)
145 if (priv->current_addr[0] == 0xff) 145 if (priv->current_addr[0] == 0xff)
146 memmove(priv->current_addr, cmd.permanentaddr, ETH_ALEN); 146 memmove(priv->current_addr, cmd.permanentaddr, ETH_ALEN);
147 147
148 memcpy(priv->dev->dev_addr, priv->current_addr, ETH_ALEN); 148 if (!priv->copied_hwaddr) {
149 if (priv->mesh_dev) 149 memcpy(priv->dev->dev_addr, priv->current_addr, ETH_ALEN);
150 memcpy(priv->mesh_dev->dev_addr, priv->current_addr, ETH_ALEN); 150 if (priv->mesh_dev)
151 memcpy(priv->mesh_dev->dev_addr,
152 priv->current_addr, ETH_ALEN);
153 priv->copied_hwaddr = 1;
154 }
151 155
152out: 156out:
153 lbs_deb_leave(LBS_DEB_CMD); 157 lbs_deb_leave(LBS_DEB_CMD);
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index 18dd9a02c45..bc461eb3966 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -90,6 +90,7 @@ struct lbs_private {
90 void *card; 90 void *card;
91 u8 fw_ready; 91 u8 fw_ready;
92 u8 surpriseremoved; 92 u8 surpriseremoved;
93 u8 setup_fw_on_resume;
93 int (*hw_host_to_card) (struct lbs_private *priv, u8 type, u8 *payload, u16 nb); 94 int (*hw_host_to_card) (struct lbs_private *priv, u8 type, u8 *payload, u16 nb);
94 void (*reset_card) (struct lbs_private *priv); 95 void (*reset_card) (struct lbs_private *priv);
95 int (*enter_deep_sleep) (struct lbs_private *priv); 96 int (*enter_deep_sleep) (struct lbs_private *priv);
@@ -101,6 +102,7 @@ struct lbs_private {
101 u32 fwcapinfo; 102 u32 fwcapinfo;
102 u16 regioncode; 103 u16 regioncode;
103 u8 current_addr[ETH_ALEN]; 104 u8 current_addr[ETH_ALEN];
105 u8 copied_hwaddr;
104 106
105 /* Command download */ 107 /* Command download */
106 u8 dnld_sent; 108 u8 dnld_sent;
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index 00600239a05..f6c2cd665f4 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -20,10 +20,8 @@
20#include <linux/moduleparam.h> 20#include <linux/moduleparam.h>
21#include <linux/firmware.h> 21#include <linux/firmware.h>
22#include <linux/jiffies.h> 22#include <linux/jiffies.h>
23#include <linux/kthread.h>
24#include <linux/list.h> 23#include <linux/list.h>
25#include <linux/netdevice.h> 24#include <linux/netdevice.h>
26#include <linux/semaphore.h>
27#include <linux/slab.h> 25#include <linux/slab.h>
28#include <linux/spi/libertas_spi.h> 26#include <linux/spi/libertas_spi.h>
29#include <linux/spi/spi.h> 27#include <linux/spi/spi.h>
@@ -34,6 +32,12 @@
34#include "dev.h" 32#include "dev.h"
35#include "if_spi.h" 33#include "if_spi.h"
36 34
35struct if_spi_packet {
36 struct list_head list;
37 u16 blen;
38 u8 buffer[0] __attribute__((aligned(4)));
39};
40
37struct if_spi_card { 41struct if_spi_card {
38 struct spi_device *spi; 42 struct spi_device *spi;
39 struct lbs_private *priv; 43 struct lbs_private *priv;
@@ -51,18 +55,36 @@ struct if_spi_card {
51 unsigned long spu_reg_delay; 55 unsigned long spu_reg_delay;
52 56
53 /* Handles all SPI communication (except for FW load) */ 57 /* Handles all SPI communication (except for FW load) */
54 struct task_struct *spi_thread; 58 struct workqueue_struct *workqueue;
55 int run_thread; 59 struct work_struct packet_work;
56
57 /* Used to wake up the spi_thread */
58 struct semaphore spi_ready;
59 struct semaphore spi_thread_terminated;
60 60
61 u8 cmd_buffer[IF_SPI_CMD_BUF_SIZE]; 61 u8 cmd_buffer[IF_SPI_CMD_BUF_SIZE];
62
63 /* A buffer of incoming packets from libertas core.
64 * Since we can't sleep in hw_host_to_card, we have to buffer
65 * them. */
66 struct list_head cmd_packet_list;
67 struct list_head data_packet_list;
68
69 /* Protects cmd_packet_list and data_packet_list */
70 spinlock_t buffer_lock;
62}; 71};
63 72
64static void free_if_spi_card(struct if_spi_card *card) 73static void free_if_spi_card(struct if_spi_card *card)
65{ 74{
75 struct list_head *cursor, *next;
76 struct if_spi_packet *packet;
77
78 list_for_each_safe(cursor, next, &card->cmd_packet_list) {
79 packet = container_of(cursor, struct if_spi_packet, list);
80 list_del(&packet->list);
81 kfree(packet);
82 }
83 list_for_each_safe(cursor, next, &card->data_packet_list) {
84 packet = container_of(cursor, struct if_spi_packet, list);
85 list_del(&packet->list);
86 kfree(packet);
87 }
66 spi_set_drvdata(card->spi, NULL); 88 spi_set_drvdata(card->spi, NULL);
67 kfree(card); 89 kfree(card);
68} 90}
@@ -622,7 +644,7 @@ out:
622/* 644/*
623 * SPI Transfer Thread 645 * SPI Transfer Thread
624 * 646 *
625 * The SPI thread handles all SPI transfers, so there is no need for a lock. 647 * The SPI worker handles all SPI transfers, so there is no need for a lock.
626 */ 648 */
627 649
628/* Move a command from the card to the host */ 650/* Move a command from the card to the host */
@@ -742,6 +764,40 @@ out:
742 return err; 764 return err;
743} 765}
744 766
767/* Move data or a command from the host to the card. */
768static void if_spi_h2c(struct if_spi_card *card,
769 struct if_spi_packet *packet, int type)
770{
771 int err = 0;
772 u16 int_type, port_reg;
773
774 switch (type) {
775 case MVMS_DAT:
776 int_type = IF_SPI_CIC_TX_DOWNLOAD_OVER;
777 port_reg = IF_SPI_DATA_RDWRPORT_REG;
778 break;
779 case MVMS_CMD:
780 int_type = IF_SPI_CIC_CMD_DOWNLOAD_OVER;
781 port_reg = IF_SPI_CMD_RDWRPORT_REG;
782 break;
783 default:
784 lbs_pr_err("can't transfer buffer of type %d\n", type);
785 err = -EINVAL;
786 goto out;
787 }
788
789 /* Write the data to the card */
790 err = spu_write(card, port_reg, packet->buffer, packet->blen);
791 if (err)
792 goto out;
793
794out:
795 kfree(packet);
796
797 if (err)
798 lbs_pr_err("%s: error %d\n", __func__, err);
799}
800
745/* Inform the host about a card event */ 801/* Inform the host about a card event */
746static void if_spi_e2h(struct if_spi_card *card) 802static void if_spi_e2h(struct if_spi_card *card)
747{ 803{
@@ -766,71 +822,88 @@ out:
766 lbs_pr_err("%s: error %d\n", __func__, err); 822 lbs_pr_err("%s: error %d\n", __func__, err);
767} 823}
768 824
769static int lbs_spi_thread(void *data) 825static void if_spi_host_to_card_worker(struct work_struct *work)
770{ 826{
771 int err; 827 int err;
772 struct if_spi_card *card = data; 828 struct if_spi_card *card;
773 u16 hiStatus; 829 u16 hiStatus;
830 unsigned long flags;
831 struct if_spi_packet *packet;
774 832
775 while (1) { 833 card = container_of(work, struct if_spi_card, packet_work);
776 /* Wait to be woken up by one of two things. First, our ISR
777 * could tell us that something happened on the WLAN.
778 * Secondly, libertas could call hw_host_to_card with more
779 * data, which we might be able to send.
780 */
781 do {
782 err = down_interruptible(&card->spi_ready);
783 if (!card->run_thread) {
784 up(&card->spi_thread_terminated);
785 do_exit(0);
786 }
787 } while (err == -EINTR);
788 834
789 /* Read the host interrupt status register to see what we 835 lbs_deb_enter(LBS_DEB_SPI);
790 * can do. */ 836
791 err = spu_read_u16(card, IF_SPI_HOST_INT_STATUS_REG, 837 /* Read the host interrupt status register to see what we
792 &hiStatus); 838 * can do. */
793 if (err) { 839 err = spu_read_u16(card, IF_SPI_HOST_INT_STATUS_REG,
794 lbs_pr_err("I/O error\n"); 840 &hiStatus);
841 if (err) {
842 lbs_pr_err("I/O error\n");
843 goto err;
844 }
845
846 if (hiStatus & IF_SPI_HIST_CMD_UPLOAD_RDY) {
847 err = if_spi_c2h_cmd(card);
848 if (err)
795 goto err; 849 goto err;
796 } 850 }
851 if (hiStatus & IF_SPI_HIST_RX_UPLOAD_RDY) {
852 err = if_spi_c2h_data(card);
853 if (err)
854 goto err;
855 }
797 856
798 if (hiStatus & IF_SPI_HIST_CMD_UPLOAD_RDY) { 857 /* workaround: in PS mode, the card does not set the Command
799 err = if_spi_c2h_cmd(card); 858 * Download Ready bit, but it sets TX Download Ready. */
800 if (err) 859 if (hiStatus & IF_SPI_HIST_CMD_DOWNLOAD_RDY ||
801 goto err; 860 (card->priv->psstate != PS_STATE_FULL_POWER &&
802 } 861 (hiStatus & IF_SPI_HIST_TX_DOWNLOAD_RDY))) {
803 if (hiStatus & IF_SPI_HIST_RX_UPLOAD_RDY) { 862 /* This means two things. First of all,
804 err = if_spi_c2h_data(card); 863 * if there was a previous command sent, the card has
805 if (err) 864 * successfully received it.
806 goto err; 865 * Secondly, it is now ready to download another
866 * command.
867 */
868 lbs_host_to_card_done(card->priv);
869
870 /* Do we have any command packets from the host to
871 * send? */
872 packet = NULL;
873 spin_lock_irqsave(&card->buffer_lock, flags);
874 if (!list_empty(&card->cmd_packet_list)) {
875 packet = (struct if_spi_packet *)(card->
876 cmd_packet_list.next);
877 list_del(&packet->list);
807 } 878 }
879 spin_unlock_irqrestore(&card->buffer_lock, flags);
808 880
809 /* workaround: in PS mode, the card does not set the Command 881 if (packet)
810 * Download Ready bit, but it sets TX Download Ready. */ 882 if_spi_h2c(card, packet, MVMS_CMD);
811 if (hiStatus & IF_SPI_HIST_CMD_DOWNLOAD_RDY || 883 }
812 (card->priv->psstate != PS_STATE_FULL_POWER && 884 if (hiStatus & IF_SPI_HIST_TX_DOWNLOAD_RDY) {
813 (hiStatus & IF_SPI_HIST_TX_DOWNLOAD_RDY))) { 885 /* Do we have any data packets from the host to
814 lbs_host_to_card_done(card->priv); 886 * send? */
887 packet = NULL;
888 spin_lock_irqsave(&card->buffer_lock, flags);
889 if (!list_empty(&card->data_packet_list)) {
890 packet = (struct if_spi_packet *)(card->
891 data_packet_list.next);
892 list_del(&packet->list);
815 } 893 }
894 spin_unlock_irqrestore(&card->buffer_lock, flags);
816 895
817 if (hiStatus & IF_SPI_HIST_CARD_EVENT) 896 if (packet)
818 if_spi_e2h(card); 897 if_spi_h2c(card, packet, MVMS_DAT);
898 }
899 if (hiStatus & IF_SPI_HIST_CARD_EVENT)
900 if_spi_e2h(card);
819 901
820err: 902err:
821 if (err) 903 if (err)
822 lbs_pr_err("%s: got error %d\n", __func__, err); 904 lbs_pr_err("%s: got error %d\n", __func__, err);
823 }
824}
825 905
826/* Block until lbs_spi_thread thread has terminated */ 906 lbs_deb_leave(LBS_DEB_SPI);
827static void if_spi_terminate_spi_thread(struct if_spi_card *card)
828{
829 /* It would be nice to use kthread_stop here, but that function
830 * can't wake threads waiting for a semaphore. */
831 card->run_thread = 0;
832 up(&card->spi_ready);
833 down(&card->spi_thread_terminated);
834} 907}
835 908
836/* 909/*
@@ -842,18 +915,40 @@ static int if_spi_host_to_card(struct lbs_private *priv,
842 u8 type, u8 *buf, u16 nb) 915 u8 type, u8 *buf, u16 nb)
843{ 916{
844 int err = 0; 917 int err = 0;
918 unsigned long flags;
845 struct if_spi_card *card = priv->card; 919 struct if_spi_card *card = priv->card;
920 struct if_spi_packet *packet;
921 u16 blen;
846 922
847 lbs_deb_enter_args(LBS_DEB_SPI, "type %d, bytes %d", type, nb); 923 lbs_deb_enter_args(LBS_DEB_SPI, "type %d, bytes %d", type, nb);
848 924
849 nb = ALIGN(nb, 4); 925 if (nb == 0) {
926 lbs_pr_err("%s: invalid size requested: %d\n", __func__, nb);
927 err = -EINVAL;
928 goto out;
929 }
930 blen = ALIGN(nb, 4);
931 packet = kzalloc(sizeof(struct if_spi_packet) + blen, GFP_ATOMIC);
932 if (!packet) {
933 err = -ENOMEM;
934 goto out;
935 }
936 packet->blen = blen;
937 memcpy(packet->buffer, buf, nb);
938 memset(packet->buffer + nb, 0, blen - nb);
850 939
851 switch (type) { 940 switch (type) {
852 case MVMS_CMD: 941 case MVMS_CMD:
853 err = spu_write(card, IF_SPI_CMD_RDWRPORT_REG, buf, nb); 942 priv->dnld_sent = DNLD_CMD_SENT;
943 spin_lock_irqsave(&card->buffer_lock, flags);
944 list_add_tail(&packet->list, &card->cmd_packet_list);
945 spin_unlock_irqrestore(&card->buffer_lock, flags);
854 break; 946 break;
855 case MVMS_DAT: 947 case MVMS_DAT:
856 err = spu_write(card, IF_SPI_DATA_RDWRPORT_REG, buf, nb); 948 priv->dnld_sent = DNLD_DATA_SENT;
949 spin_lock_irqsave(&card->buffer_lock, flags);
950 list_add_tail(&packet->list, &card->data_packet_list);
951 spin_unlock_irqrestore(&card->buffer_lock, flags);
857 break; 952 break;
858 default: 953 default:
859 lbs_pr_err("can't transfer buffer of type %d", type); 954 lbs_pr_err("can't transfer buffer of type %d", type);
@@ -861,6 +956,9 @@ static int if_spi_host_to_card(struct lbs_private *priv,
861 break; 956 break;
862 } 957 }
863 958
959 /* Queue spi xfer work */
960 queue_work(card->workqueue, &card->packet_work);
961out:
864 lbs_deb_leave_args(LBS_DEB_SPI, "err=%d", err); 962 lbs_deb_leave_args(LBS_DEB_SPI, "err=%d", err);
865 return err; 963 return err;
866} 964}
@@ -869,13 +967,14 @@ static int if_spi_host_to_card(struct lbs_private *priv,
869 * Host Interrupts 967 * Host Interrupts
870 * 968 *
871 * Service incoming interrupts from the WLAN device. We can't sleep here, so 969 * Service incoming interrupts from the WLAN device. We can't sleep here, so
872 * don't try to talk on the SPI bus, just wake up the SPI thread. 970 * don't try to talk on the SPI bus, just queue the SPI xfer work.
873 */ 971 */
874static irqreturn_t if_spi_host_interrupt(int irq, void *dev_id) 972static irqreturn_t if_spi_host_interrupt(int irq, void *dev_id)
875{ 973{
876 struct if_spi_card *card = dev_id; 974 struct if_spi_card *card = dev_id;
877 975
878 up(&card->spi_ready); 976 queue_work(card->workqueue, &card->packet_work);
977
879 return IRQ_HANDLED; 978 return IRQ_HANDLED;
880} 979}
881 980
@@ -883,56 +982,26 @@ static irqreturn_t if_spi_host_interrupt(int irq, void *dev_id)
883 * SPI callbacks 982 * SPI callbacks
884 */ 983 */
885 984
886static int __devinit if_spi_probe(struct spi_device *spi) 985static int if_spi_init_card(struct if_spi_card *card)
887{ 986{
888 struct if_spi_card *card; 987 struct spi_device *spi = card->spi;
889 struct lbs_private *priv = NULL; 988 int err, i;
890 struct libertas_spi_platform_data *pdata = spi->dev.platform_data;
891 int err = 0, i;
892 u32 scratch; 989 u32 scratch;
893 struct sched_param param = { .sched_priority = 1 };
894 const struct firmware *helper = NULL; 990 const struct firmware *helper = NULL;
895 const struct firmware *mainfw = NULL; 991 const struct firmware *mainfw = NULL;
896 992
897 lbs_deb_enter(LBS_DEB_SPI); 993 lbs_deb_enter(LBS_DEB_SPI);
898 994
899 if (!pdata) { 995 err = spu_init(card, card->pdata->use_dummy_writes);
900 err = -EINVAL;
901 goto out;
902 }
903
904 if (pdata->setup) {
905 err = pdata->setup(spi);
906 if (err)
907 goto out;
908 }
909
910 /* Allocate card structure to represent this specific device */
911 card = kzalloc(sizeof(struct if_spi_card), GFP_KERNEL);
912 if (!card) {
913 err = -ENOMEM;
914 goto out;
915 }
916 spi_set_drvdata(spi, card);
917 card->pdata = pdata;
918 card->spi = spi;
919 card->prev_xfer_time = jiffies;
920
921 sema_init(&card->spi_ready, 0);
922 sema_init(&card->spi_thread_terminated, 0);
923
924 /* Initialize the SPI Interface Unit */
925 err = spu_init(card, pdata->use_dummy_writes);
926 if (err) 996 if (err)
927 goto free_card; 997 goto out;
928 err = spu_get_chip_revision(card, &card->card_id, &card->card_rev); 998 err = spu_get_chip_revision(card, &card->card_id, &card->card_rev);
929 if (err) 999 if (err)
930 goto free_card; 1000 goto out;
931 1001
932 /* Firmware load */
933 err = spu_read_u32(card, IF_SPI_SCRATCH_4_REG, &scratch); 1002 err = spu_read_u32(card, IF_SPI_SCRATCH_4_REG, &scratch);
934 if (err) 1003 if (err)
935 goto free_card; 1004 goto out;
936 if (scratch == SUCCESSFUL_FW_DOWNLOAD_MAGIC) 1005 if (scratch == SUCCESSFUL_FW_DOWNLOAD_MAGIC)
937 lbs_deb_spi("Firmware is already loaded for " 1006 lbs_deb_spi("Firmware is already loaded for "
938 "Marvell WLAN 802.11 adapter\n"); 1007 "Marvell WLAN 802.11 adapter\n");
@@ -946,7 +1015,7 @@ static int __devinit if_spi_probe(struct spi_device *spi)
946 lbs_pr_err("Unsupported chip_id: 0x%02x\n", 1015 lbs_pr_err("Unsupported chip_id: 0x%02x\n",
947 card->card_id); 1016 card->card_id);
948 err = -ENODEV; 1017 err = -ENODEV;
949 goto free_card; 1018 goto out;
950 } 1019 }
951 1020
952 err = lbs_get_firmware(&card->spi->dev, NULL, NULL, 1021 err = lbs_get_firmware(&card->spi->dev, NULL, NULL,
@@ -954,7 +1023,7 @@ static int __devinit if_spi_probe(struct spi_device *spi)
954 &mainfw); 1023 &mainfw);
955 if (err) { 1024 if (err) {
956 lbs_pr_err("failed to find firmware (%d)\n", err); 1025 lbs_pr_err("failed to find firmware (%d)\n", err);
957 goto free_card; 1026 goto out;
958 } 1027 }
959 1028
960 lbs_deb_spi("Initializing FW for Marvell WLAN 802.11 adapter " 1029 lbs_deb_spi("Initializing FW for Marvell WLAN 802.11 adapter "
@@ -966,15 +1035,68 @@ static int __devinit if_spi_probe(struct spi_device *spi)
966 spi->max_speed_hz); 1035 spi->max_speed_hz);
967 err = if_spi_prog_helper_firmware(card, helper); 1036 err = if_spi_prog_helper_firmware(card, helper);
968 if (err) 1037 if (err)
969 goto free_card; 1038 goto out;
970 err = if_spi_prog_main_firmware(card, mainfw); 1039 err = if_spi_prog_main_firmware(card, mainfw);
971 if (err) 1040 if (err)
972 goto free_card; 1041 goto out;
973 lbs_deb_spi("loaded FW for Marvell WLAN 802.11 adapter\n"); 1042 lbs_deb_spi("loaded FW for Marvell WLAN 802.11 adapter\n");
974 } 1043 }
975 1044
976 err = spu_set_interrupt_mode(card, 0, 1); 1045 err = spu_set_interrupt_mode(card, 0, 1);
977 if (err) 1046 if (err)
1047 goto out;
1048
1049out:
1050 if (helper)
1051 release_firmware(helper);
1052 if (mainfw)
1053 release_firmware(mainfw);
1054
1055 lbs_deb_leave_args(LBS_DEB_SPI, "err %d\n", err);
1056
1057 return err;
1058}
1059
1060static int __devinit if_spi_probe(struct spi_device *spi)
1061{
1062 struct if_spi_card *card;
1063 struct lbs_private *priv = NULL;
1064 struct libertas_spi_platform_data *pdata = spi->dev.platform_data;
1065 int err = 0;
1066
1067 lbs_deb_enter(LBS_DEB_SPI);
1068
1069 if (!pdata) {
1070 err = -EINVAL;
1071 goto out;
1072 }
1073
1074 if (pdata->setup) {
1075 err = pdata->setup(spi);
1076 if (err)
1077 goto out;
1078 }
1079
1080 /* Allocate card structure to represent this specific device */
1081 card = kzalloc(sizeof(struct if_spi_card), GFP_KERNEL);
1082 if (!card) {
1083 err = -ENOMEM;
1084 goto teardown;
1085 }
1086 spi_set_drvdata(spi, card);
1087 card->pdata = pdata;
1088 card->spi = spi;
1089 card->prev_xfer_time = jiffies;
1090
1091 INIT_LIST_HEAD(&card->cmd_packet_list);
1092 INIT_LIST_HEAD(&card->data_packet_list);
1093 spin_lock_init(&card->buffer_lock);
1094
1095 /* Initialize the SPI Interface Unit */
1096
1097 /* Firmware load */
1098 err = if_spi_init_card(card);
1099 if (err)
978 goto free_card; 1100 goto free_card;
979 1101
980 /* Register our card with libertas. 1102 /* Register our card with libertas.
@@ -993,27 +1115,16 @@ static int __devinit if_spi_probe(struct spi_device *spi)
993 priv->fw_ready = 1; 1115 priv->fw_ready = 1;
994 1116
995 /* Initialize interrupt handling stuff. */ 1117 /* Initialize interrupt handling stuff. */
996 card->run_thread = 1; 1118 card->workqueue = create_workqueue("libertas_spi");
997 card->spi_thread = kthread_run(lbs_spi_thread, card, "lbs_spi_thread"); 1119 INIT_WORK(&card->packet_work, if_spi_host_to_card_worker);
998 if (IS_ERR(card->spi_thread)) {
999 card->run_thread = 0;
1000 err = PTR_ERR(card->spi_thread);
1001 lbs_pr_err("error creating SPI thread: err=%d\n", err);
1002 goto remove_card;
1003 }
1004 if (sched_setscheduler(card->spi_thread, SCHED_FIFO, &param))
1005 lbs_pr_err("Error setting scheduler, using default.\n");
1006 1120
1007 err = request_irq(spi->irq, if_spi_host_interrupt, 1121 err = request_irq(spi->irq, if_spi_host_interrupt,
1008 IRQF_TRIGGER_FALLING, "libertas_spi", card); 1122 IRQF_TRIGGER_FALLING, "libertas_spi", card);
1009 if (err) { 1123 if (err) {
1010 lbs_pr_err("can't get host irq line-- request_irq failed\n"); 1124 lbs_pr_err("can't get host irq line-- request_irq failed\n");
1011 goto terminate_thread; 1125 goto terminate_workqueue;
1012 } 1126 }
1013 1127
1014 /* poke the IRQ handler so that we don't miss the first interrupt */
1015 up(&card->spi_ready);
1016
1017 /* Start the card. 1128 /* Start the card.
1018 * This will call register_netdev, and we'll start 1129 * This will call register_netdev, and we'll start
1019 * getting interrupts... */ 1130 * getting interrupts... */
@@ -1028,18 +1139,16 @@ static int __devinit if_spi_probe(struct spi_device *spi)
1028 1139
1029release_irq: 1140release_irq:
1030 free_irq(spi->irq, card); 1141 free_irq(spi->irq, card);
1031terminate_thread: 1142terminate_workqueue:
1032 if_spi_terminate_spi_thread(card); 1143 flush_workqueue(card->workqueue);
1033remove_card: 1144 destroy_workqueue(card->workqueue);
1034 lbs_remove_card(priv); /* will call free_netdev */ 1145 lbs_remove_card(priv); /* will call free_netdev */
1035free_card: 1146free_card:
1036 free_if_spi_card(card); 1147 free_if_spi_card(card);
1148teardown:
1149 if (pdata->teardown)
1150 pdata->teardown(spi);
1037out: 1151out:
1038 if (helper)
1039 release_firmware(helper);
1040 if (mainfw)
1041 release_firmware(mainfw);
1042
1043 lbs_deb_leave_args(LBS_DEB_SPI, "err %d\n", err); 1152 lbs_deb_leave_args(LBS_DEB_SPI, "err %d\n", err);
1044 return err; 1153 return err;
1045} 1154}
@@ -1056,7 +1165,8 @@ static int __devexit libertas_spi_remove(struct spi_device *spi)
1056 lbs_remove_card(priv); /* will call free_netdev */ 1165 lbs_remove_card(priv); /* will call free_netdev */
1057 1166
1058 free_irq(spi->irq, card); 1167 free_irq(spi->irq, card);
1059 if_spi_terminate_spi_thread(card); 1168 flush_workqueue(card->workqueue);
1169 destroy_workqueue(card->workqueue);
1060 if (card->pdata->teardown) 1170 if (card->pdata->teardown)
1061 card->pdata->teardown(spi); 1171 card->pdata->teardown(spi);
1062 free_if_spi_card(card); 1172 free_if_spi_card(card);
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 6836a6dd985..ca8149cd5bd 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -539,6 +539,43 @@ static int lbs_thread(void *data)
539 return 0; 539 return 0;
540} 540}
541 541
542/**
543 * @brief This function gets the HW spec from the firmware and sets
544 * some basic parameters.
545 *
546 * @param priv A pointer to struct lbs_private structure
547 * @return 0 or -1
548 */
549static int lbs_setup_firmware(struct lbs_private *priv)
550{
551 int ret = -1;
552 s16 curlevel = 0, minlevel = 0, maxlevel = 0;
553
554 lbs_deb_enter(LBS_DEB_FW);
555
556 /* Read MAC address from firmware */
557 memset(priv->current_addr, 0xff, ETH_ALEN);
558 ret = lbs_update_hw_spec(priv);
559 if (ret)
560 goto done;
561
562 /* Read power levels if available */
563 ret = lbs_get_tx_power(priv, &curlevel, &minlevel, &maxlevel);
564 if (ret == 0) {
565 priv->txpower_cur = curlevel;
566 priv->txpower_min = minlevel;
567 priv->txpower_max = maxlevel;
568 }
569
570 /* Send cmd to FW to enable 11D function */
571 ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_11D_ENABLE, 1);
572
573 lbs_set_mac_control(priv);
574done:
575 lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret);
576 return ret;
577}
578
542int lbs_suspend(struct lbs_private *priv) 579int lbs_suspend(struct lbs_private *priv)
543{ 580{
544 int ret; 581 int ret;
@@ -584,47 +621,13 @@ int lbs_resume(struct lbs_private *priv)
584 lbs_pr_err("deep sleep activation failed: %d\n", ret); 621 lbs_pr_err("deep sleep activation failed: %d\n", ret);
585 } 622 }
586 623
587 lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret); 624 if (priv->setup_fw_on_resume)
588 return ret; 625 ret = lbs_setup_firmware(priv);
589}
590EXPORT_SYMBOL_GPL(lbs_resume);
591
592/**
593 * @brief This function gets the HW spec from the firmware and sets
594 * some basic parameters.
595 *
596 * @param priv A pointer to struct lbs_private structure
597 * @return 0 or -1
598 */
599static int lbs_setup_firmware(struct lbs_private *priv)
600{
601 int ret = -1;
602 s16 curlevel = 0, minlevel = 0, maxlevel = 0;
603
604 lbs_deb_enter(LBS_DEB_FW);
605
606 /* Read MAC address from firmware */
607 memset(priv->current_addr, 0xff, ETH_ALEN);
608 ret = lbs_update_hw_spec(priv);
609 if (ret)
610 goto done;
611
612 /* Read power levels if available */
613 ret = lbs_get_tx_power(priv, &curlevel, &minlevel, &maxlevel);
614 if (ret == 0) {
615 priv->txpower_cur = curlevel;
616 priv->txpower_min = minlevel;
617 priv->txpower_max = maxlevel;
618 }
619 626
620 /* Send cmd to FW to enable 11D function */
621 ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_11D_ENABLE, 1);
622
623 lbs_set_mac_control(priv);
624done:
625 lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret); 627 lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret);
626 return ret; 628 return ret;
627} 629}
630EXPORT_SYMBOL_GPL(lbs_resume);
628 631
629/** 632/**
630 * This function handles the timeout of command sending. 633 * This function handles the timeout of command sending.
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 454f045ddff..5d39b284058 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -943,7 +943,8 @@ static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw,
943static int mac80211_hwsim_ampdu_action(struct ieee80211_hw *hw, 943static int mac80211_hwsim_ampdu_action(struct ieee80211_hw *hw,
944 struct ieee80211_vif *vif, 944 struct ieee80211_vif *vif,
945 enum ieee80211_ampdu_mlme_action action, 945 enum ieee80211_ampdu_mlme_action action,
946 struct ieee80211_sta *sta, u16 tid, u16 *ssn) 946 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
947 u8 buf_size)
947{ 948{
948 switch (action) { 949 switch (action) {
949 case IEEE80211_AMPDU_TX_START: 950 case IEEE80211_AMPDU_TX_START:
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 9ecf8407cb1..af4f2c64f24 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -232,6 +232,9 @@ struct mwl8k_priv {
232 struct completion firmware_loading_complete; 232 struct completion firmware_loading_complete;
233}; 233};
234 234
235#define MAX_WEP_KEY_LEN 13
236#define NUM_WEP_KEYS 4
237
235/* Per interface specific private data */ 238/* Per interface specific private data */
236struct mwl8k_vif { 239struct mwl8k_vif {
237 struct list_head list; 240 struct list_head list;
@@ -242,8 +245,21 @@ struct mwl8k_vif {
242 245
243 /* Non AMPDU sequence number assigned by driver. */ 246 /* Non AMPDU sequence number assigned by driver. */
244 u16 seqno; 247 u16 seqno;
248
249 /* Saved WEP keys */
250 struct {
251 u8 enabled;
252 u8 key[sizeof(struct ieee80211_key_conf) + MAX_WEP_KEY_LEN];
253 } wep_key_conf[NUM_WEP_KEYS];
254
255 /* BSSID */
256 u8 bssid[ETH_ALEN];
257
258 /* A flag to indicate is HW crypto is enabled for this bssid */
259 bool is_hw_crypto_enabled;
245}; 260};
246#define MWL8K_VIF(_vif) ((struct mwl8k_vif *)&((_vif)->drv_priv)) 261#define MWL8K_VIF(_vif) ((struct mwl8k_vif *)&((_vif)->drv_priv))
262#define IEEE80211_KEY_CONF(_u8) ((struct ieee80211_key_conf *)(_u8))
247 263
248struct mwl8k_sta { 264struct mwl8k_sta {
249 /* Index into station database. Returned by UPDATE_STADB. */ 265 /* Index into station database. Returned by UPDATE_STADB. */
@@ -337,6 +353,7 @@ static const struct ieee80211_rate mwl8k_rates_50[] = {
337#define MWL8K_CMD_SET_RATEADAPT_MODE 0x0203 353#define MWL8K_CMD_SET_RATEADAPT_MODE 0x0203
338#define MWL8K_CMD_BSS_START 0x1100 /* per-vif */ 354#define MWL8K_CMD_BSS_START 0x1100 /* per-vif */
339#define MWL8K_CMD_SET_NEW_STN 0x1111 /* per-vif */ 355#define MWL8K_CMD_SET_NEW_STN 0x1111 /* per-vif */
356#define MWL8K_CMD_UPDATE_ENCRYPTION 0x1122 /* per-vif */
340#define MWL8K_CMD_UPDATE_STADB 0x1123 357#define MWL8K_CMD_UPDATE_STADB 0x1123
341 358
342static const char *mwl8k_cmd_name(__le16 cmd, char *buf, int bufsize) 359static const char *mwl8k_cmd_name(__le16 cmd, char *buf, int bufsize)
@@ -375,6 +392,7 @@ static const char *mwl8k_cmd_name(__le16 cmd, char *buf, int bufsize)
375 MWL8K_CMDNAME(SET_RATEADAPT_MODE); 392 MWL8K_CMDNAME(SET_RATEADAPT_MODE);
376 MWL8K_CMDNAME(BSS_START); 393 MWL8K_CMDNAME(BSS_START);
377 MWL8K_CMDNAME(SET_NEW_STN); 394 MWL8K_CMDNAME(SET_NEW_STN);
395 MWL8K_CMDNAME(UPDATE_ENCRYPTION);
378 MWL8K_CMDNAME(UPDATE_STADB); 396 MWL8K_CMDNAME(UPDATE_STADB);
379 default: 397 default:
380 snprintf(buf, bufsize, "0x%x", cmd); 398 snprintf(buf, bufsize, "0x%x", cmd);
@@ -715,10 +733,12 @@ static inline void mwl8k_remove_dma_header(struct sk_buff *skb, __le16 qos)
715 skb_pull(skb, sizeof(*tr) - hdrlen); 733 skb_pull(skb, sizeof(*tr) - hdrlen);
716} 734}
717 735
718static inline void mwl8k_add_dma_header(struct sk_buff *skb) 736static void
737mwl8k_add_dma_header(struct sk_buff *skb, int tail_pad)
719{ 738{
720 struct ieee80211_hdr *wh; 739 struct ieee80211_hdr *wh;
721 int hdrlen; 740 int hdrlen;
741 int reqd_hdrlen;
722 struct mwl8k_dma_data *tr; 742 struct mwl8k_dma_data *tr;
723 743
724 /* 744 /*
@@ -730,11 +750,13 @@ static inline void mwl8k_add_dma_header(struct sk_buff *skb)
730 wh = (struct ieee80211_hdr *)skb->data; 750 wh = (struct ieee80211_hdr *)skb->data;
731 751
732 hdrlen = ieee80211_hdrlen(wh->frame_control); 752 hdrlen = ieee80211_hdrlen(wh->frame_control);
733 if (hdrlen != sizeof(*tr)) 753 reqd_hdrlen = sizeof(*tr);
734 skb_push(skb, sizeof(*tr) - hdrlen); 754
755 if (hdrlen != reqd_hdrlen)
756 skb_push(skb, reqd_hdrlen - hdrlen);
735 757
736 if (ieee80211_is_data_qos(wh->frame_control)) 758 if (ieee80211_is_data_qos(wh->frame_control))
737 hdrlen -= 2; 759 hdrlen -= IEEE80211_QOS_CTL_LEN;
738 760
739 tr = (struct mwl8k_dma_data *)skb->data; 761 tr = (struct mwl8k_dma_data *)skb->data;
740 if (wh != &tr->wh) 762 if (wh != &tr->wh)
@@ -747,9 +769,52 @@ static inline void mwl8k_add_dma_header(struct sk_buff *skb)
747 * payload". That is, everything except for the 802.11 header. 769 * payload". That is, everything except for the 802.11 header.
748 * This includes all crypto material including the MIC. 770 * This includes all crypto material including the MIC.
749 */ 771 */
750 tr->fwlen = cpu_to_le16(skb->len - sizeof(*tr)); 772 tr->fwlen = cpu_to_le16(skb->len - sizeof(*tr) + tail_pad);
751} 773}
752 774
775static void mwl8k_encapsulate_tx_frame(struct sk_buff *skb)
776{
777 struct ieee80211_hdr *wh;
778 struct ieee80211_tx_info *tx_info;
779 struct ieee80211_key_conf *key_conf;
780 int data_pad;
781
782 wh = (struct ieee80211_hdr *)skb->data;
783
784 tx_info = IEEE80211_SKB_CB(skb);
785
786 key_conf = NULL;
787 if (ieee80211_is_data(wh->frame_control))
788 key_conf = tx_info->control.hw_key;
789
790 /*
791 * Make sure the packet header is in the DMA header format (4-address
792 * without QoS), the necessary crypto padding between the header and the
793 * payload has already been provided by mac80211, but it doesn't add tail
794 * padding when HW crypto is enabled.
795 *
796 * We have the following trailer padding requirements:
797 * - WEP: 4 trailer bytes (ICV)
798 * - TKIP: 12 trailer bytes (8 MIC + 4 ICV)
799 * - CCMP: 8 trailer bytes (MIC)
800 */
801 data_pad = 0;
802 if (key_conf != NULL) {
803 switch (key_conf->cipher) {
804 case WLAN_CIPHER_SUITE_WEP40:
805 case WLAN_CIPHER_SUITE_WEP104:
806 data_pad = 4;
807 break;
808 case WLAN_CIPHER_SUITE_TKIP:
809 data_pad = 12;
810 break;
811 case WLAN_CIPHER_SUITE_CCMP:
812 data_pad = 8;
813 break;
814 }
815 }
816 mwl8k_add_dma_header(skb, data_pad);
817}
753 818
754/* 819/*
755 * Packet reception for 88w8366 AP firmware. 820 * Packet reception for 88w8366 AP firmware.
@@ -778,6 +843,13 @@ struct mwl8k_rxd_8366_ap {
778 843
779#define MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST 0x80 844#define MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST 0x80
780 845
846/* 8366 AP rx_status bits */
847#define MWL8K_8366_AP_RXSTAT_DECRYPT_ERR_MASK 0x80
848#define MWL8K_8366_AP_RXSTAT_GENERAL_DECRYPT_ERR 0xFF
849#define MWL8K_8366_AP_RXSTAT_TKIP_DECRYPT_MIC_ERR 0x02
850#define MWL8K_8366_AP_RXSTAT_WEP_DECRYPT_ICV_ERR 0x04
851#define MWL8K_8366_AP_RXSTAT_TKIP_DECRYPT_ICV_ERR 0x08
852
781static void mwl8k_rxd_8366_ap_init(void *_rxd, dma_addr_t next_dma_addr) 853static void mwl8k_rxd_8366_ap_init(void *_rxd, dma_addr_t next_dma_addr)
782{ 854{
783 struct mwl8k_rxd_8366_ap *rxd = _rxd; 855 struct mwl8k_rxd_8366_ap *rxd = _rxd;
@@ -834,10 +906,16 @@ mwl8k_rxd_8366_ap_process(void *_rxd, struct ieee80211_rx_status *status,
834 } else { 906 } else {
835 status->band = IEEE80211_BAND_2GHZ; 907 status->band = IEEE80211_BAND_2GHZ;
836 } 908 }
837 status->freq = ieee80211_channel_to_frequency(rxd->channel); 909 status->freq = ieee80211_channel_to_frequency(rxd->channel,
910 status->band);
838 911
839 *qos = rxd->qos_control; 912 *qos = rxd->qos_control;
840 913
914 if ((rxd->rx_status != MWL8K_8366_AP_RXSTAT_GENERAL_DECRYPT_ERR) &&
915 (rxd->rx_status & MWL8K_8366_AP_RXSTAT_DECRYPT_ERR_MASK) &&
916 (rxd->rx_status & MWL8K_8366_AP_RXSTAT_TKIP_DECRYPT_MIC_ERR))
917 status->flag |= RX_FLAG_MMIC_ERROR;
918
841 return le16_to_cpu(rxd->pkt_len); 919 return le16_to_cpu(rxd->pkt_len);
842} 920}
843 921
@@ -876,6 +954,11 @@ struct mwl8k_rxd_sta {
876#define MWL8K_STA_RATE_INFO_MCS_FORMAT 0x0001 954#define MWL8K_STA_RATE_INFO_MCS_FORMAT 0x0001
877 955
878#define MWL8K_STA_RX_CTRL_OWNED_BY_HOST 0x02 956#define MWL8K_STA_RX_CTRL_OWNED_BY_HOST 0x02
957#define MWL8K_STA_RX_CTRL_DECRYPT_ERROR 0x04
958/* ICV=0 or MIC=1 */
959#define MWL8K_STA_RX_CTRL_DEC_ERR_TYPE 0x08
960/* Key is uploaded only in failure case */
961#define MWL8K_STA_RX_CTRL_KEY_INDEX 0x30
879 962
880static void mwl8k_rxd_sta_init(void *_rxd, dma_addr_t next_dma_addr) 963static void mwl8k_rxd_sta_init(void *_rxd, dma_addr_t next_dma_addr)
881{ 964{
@@ -931,9 +1014,13 @@ mwl8k_rxd_sta_process(void *_rxd, struct ieee80211_rx_status *status,
931 } else { 1014 } else {
932 status->band = IEEE80211_BAND_2GHZ; 1015 status->band = IEEE80211_BAND_2GHZ;
933 } 1016 }
934 status->freq = ieee80211_channel_to_frequency(rxd->channel); 1017 status->freq = ieee80211_channel_to_frequency(rxd->channel,
1018 status->band);
935 1019
936 *qos = rxd->qos_control; 1020 *qos = rxd->qos_control;
1021 if ((rxd->rx_ctrl & MWL8K_STA_RX_CTRL_DECRYPT_ERROR) &&
1022 (rxd->rx_ctrl & MWL8K_STA_RX_CTRL_DEC_ERR_TYPE))
1023 status->flag |= RX_FLAG_MMIC_ERROR;
937 1024
938 return le16_to_cpu(rxd->pkt_len); 1025 return le16_to_cpu(rxd->pkt_len);
939} 1026}
@@ -1092,9 +1179,25 @@ static inline void mwl8k_save_beacon(struct ieee80211_hw *hw,
1092 ieee80211_queue_work(hw, &priv->finalize_join_worker); 1179 ieee80211_queue_work(hw, &priv->finalize_join_worker);
1093} 1180}
1094 1181
1182static inline struct mwl8k_vif *mwl8k_find_vif_bss(struct list_head *vif_list,
1183 u8 *bssid)
1184{
1185 struct mwl8k_vif *mwl8k_vif;
1186
1187 list_for_each_entry(mwl8k_vif,
1188 vif_list, list) {
1189 if (memcmp(bssid, mwl8k_vif->bssid,
1190 ETH_ALEN) == 0)
1191 return mwl8k_vif;
1192 }
1193
1194 return NULL;
1195}
1196
1095static int rxq_process(struct ieee80211_hw *hw, int index, int limit) 1197static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
1096{ 1198{
1097 struct mwl8k_priv *priv = hw->priv; 1199 struct mwl8k_priv *priv = hw->priv;
1200 struct mwl8k_vif *mwl8k_vif = NULL;
1098 struct mwl8k_rx_queue *rxq = priv->rxq + index; 1201 struct mwl8k_rx_queue *rxq = priv->rxq + index;
1099 int processed; 1202 int processed;
1100 1203
@@ -1104,6 +1207,7 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
1104 void *rxd; 1207 void *rxd;
1105 int pkt_len; 1208 int pkt_len;
1106 struct ieee80211_rx_status status; 1209 struct ieee80211_rx_status status;
1210 struct ieee80211_hdr *wh;
1107 __le16 qos; 1211 __le16 qos;
1108 1212
1109 skb = rxq->buf[rxq->head].skb; 1213 skb = rxq->buf[rxq->head].skb;
@@ -1130,8 +1234,7 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
1130 1234
1131 rxq->rxd_count--; 1235 rxq->rxd_count--;
1132 1236
1133 skb_put(skb, pkt_len); 1237 wh = &((struct mwl8k_dma_data *)skb->data)->wh;
1134 mwl8k_remove_dma_header(skb, qos);
1135 1238
1136 /* 1239 /*
1137 * Check for a pending join operation. Save a 1240 * Check for a pending join operation. Save a
@@ -1141,6 +1244,46 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
1141 if (mwl8k_capture_bssid(priv, (void *)skb->data)) 1244 if (mwl8k_capture_bssid(priv, (void *)skb->data))
1142 mwl8k_save_beacon(hw, skb); 1245 mwl8k_save_beacon(hw, skb);
1143 1246
1247 if (ieee80211_has_protected(wh->frame_control)) {
1248
1249 /* Check if hw crypto has been enabled for
1250 * this bss. If yes, set the status flags
1251 * accordingly
1252 */
1253 mwl8k_vif = mwl8k_find_vif_bss(&priv->vif_list,
1254 wh->addr1);
1255
1256 if (mwl8k_vif != NULL &&
1257 mwl8k_vif->is_hw_crypto_enabled == true) {
1258 /*
1259 * When MMIC ERROR is encountered
1260 * by the firmware, payload is
1261 * dropped and only 32 bytes of
1262 * mwl8k Firmware header is sent
1263 * to the host.
1264 *
1265 * We need to add four bytes of
1266 * key information. In it
1267 * MAC80211 expects keyidx set to
1268 * 0 for triggering Counter
1269 * Measure of MMIC failure.
1270 */
1271 if (status.flag & RX_FLAG_MMIC_ERROR) {
1272 struct mwl8k_dma_data *tr;
1273 tr = (struct mwl8k_dma_data *)skb->data;
1274 memset((void *)&(tr->data), 0, 4);
1275 pkt_len += 4;
1276 }
1277
1278 if (!ieee80211_is_auth(wh->frame_control))
1279 status.flag |= RX_FLAG_IV_STRIPPED |
1280 RX_FLAG_DECRYPTED |
1281 RX_FLAG_MMIC_STRIPPED;
1282 }
1283 }
1284
1285 skb_put(skb, pkt_len);
1286 mwl8k_remove_dma_header(skb, qos);
1144 memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); 1287 memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
1145 ieee80211_rx_irqsafe(hw, skb); 1288 ieee80211_rx_irqsafe(hw, skb);
1146 1289
@@ -1443,7 +1586,11 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
1443 else 1586 else
1444 qos = 0; 1587 qos = 0;
1445 1588
1446 mwl8k_add_dma_header(skb); 1589 if (priv->ap_fw)
1590 mwl8k_encapsulate_tx_frame(skb);
1591 else
1592 mwl8k_add_dma_header(skb, 0);
1593
1447 wh = &((struct mwl8k_dma_data *)skb->data)->wh; 1594 wh = &((struct mwl8k_dma_data *)skb->data)->wh;
1448 1595
1449 tx_info = IEEE80211_SKB_CB(skb); 1596 tx_info = IEEE80211_SKB_CB(skb);
@@ -3099,6 +3246,274 @@ static int mwl8k_cmd_set_new_stn_del(struct ieee80211_hw *hw,
3099} 3246}
3100 3247
3101/* 3248/*
3249 * CMD_UPDATE_ENCRYPTION.
3250 */
3251
3252#define MAX_ENCR_KEY_LENGTH 16
3253#define MIC_KEY_LENGTH 8
3254
3255struct mwl8k_cmd_update_encryption {
3256 struct mwl8k_cmd_pkt header;
3257
3258 __le32 action;
3259 __le32 reserved;
3260 __u8 mac_addr[6];
3261 __u8 encr_type;
3262
3263} __attribute__((packed));
3264
3265struct mwl8k_cmd_set_key {
3266 struct mwl8k_cmd_pkt header;
3267
3268 __le32 action;
3269 __le32 reserved;
3270 __le16 length;
3271 __le16 key_type_id;
3272 __le32 key_info;
3273 __le32 key_id;
3274 __le16 key_len;
3275 __u8 key_material[MAX_ENCR_KEY_LENGTH];
3276 __u8 tkip_tx_mic_key[MIC_KEY_LENGTH];
3277 __u8 tkip_rx_mic_key[MIC_KEY_LENGTH];
3278 __le16 tkip_rsc_low;
3279 __le32 tkip_rsc_high;
3280 __le16 tkip_tsc_low;
3281 __le32 tkip_tsc_high;
3282 __u8 mac_addr[6];
3283} __attribute__((packed));
3284
3285enum {
3286 MWL8K_ENCR_ENABLE,
3287 MWL8K_ENCR_SET_KEY,
3288 MWL8K_ENCR_REMOVE_KEY,
3289 MWL8K_ENCR_SET_GROUP_KEY,
3290};
3291
3292#define MWL8K_UPDATE_ENCRYPTION_TYPE_WEP 0
3293#define MWL8K_UPDATE_ENCRYPTION_TYPE_DISABLE 1
3294#define MWL8K_UPDATE_ENCRYPTION_TYPE_TKIP 4
3295#define MWL8K_UPDATE_ENCRYPTION_TYPE_MIXED 7
3296#define MWL8K_UPDATE_ENCRYPTION_TYPE_AES 8
3297
3298enum {
3299 MWL8K_ALG_WEP,
3300 MWL8K_ALG_TKIP,
3301 MWL8K_ALG_CCMP,
3302};
3303
3304#define MWL8K_KEY_FLAG_TXGROUPKEY 0x00000004
3305#define MWL8K_KEY_FLAG_PAIRWISE 0x00000008
3306#define MWL8K_KEY_FLAG_TSC_VALID 0x00000040
3307#define MWL8K_KEY_FLAG_WEP_TXKEY 0x01000000
3308#define MWL8K_KEY_FLAG_MICKEY_VALID 0x02000000
3309
3310static int mwl8k_cmd_update_encryption_enable(struct ieee80211_hw *hw,
3311 struct ieee80211_vif *vif,
3312 u8 *addr,
3313 u8 encr_type)
3314{
3315 struct mwl8k_cmd_update_encryption *cmd;
3316 int rc;
3317
3318 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
3319 if (cmd == NULL)
3320 return -ENOMEM;
3321
3322 cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_ENCRYPTION);
3323 cmd->header.length = cpu_to_le16(sizeof(*cmd));
3324 cmd->action = cpu_to_le32(MWL8K_ENCR_ENABLE);
3325 memcpy(cmd->mac_addr, addr, ETH_ALEN);
3326 cmd->encr_type = encr_type;
3327
3328 rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
3329 kfree(cmd);
3330
3331 return rc;
3332}
3333
3334static int mwl8k_encryption_set_cmd_info(struct mwl8k_cmd_set_key *cmd,
3335 u8 *addr,
3336 struct ieee80211_key_conf *key)
3337{
3338 cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_ENCRYPTION);
3339 cmd->header.length = cpu_to_le16(sizeof(*cmd));
3340 cmd->length = cpu_to_le16(sizeof(*cmd) -
3341 offsetof(struct mwl8k_cmd_set_key, length));
3342 cmd->key_id = cpu_to_le32(key->keyidx);
3343 cmd->key_len = cpu_to_le16(key->keylen);
3344 memcpy(cmd->mac_addr, addr, ETH_ALEN);
3345
3346 switch (key->cipher) {
3347 case WLAN_CIPHER_SUITE_WEP40:
3348 case WLAN_CIPHER_SUITE_WEP104:
3349 cmd->key_type_id = cpu_to_le16(MWL8K_ALG_WEP);
3350 if (key->keyidx == 0)
3351 cmd->key_info = cpu_to_le32(MWL8K_KEY_FLAG_WEP_TXKEY);
3352
3353 break;
3354 case WLAN_CIPHER_SUITE_TKIP:
3355 cmd->key_type_id = cpu_to_le16(MWL8K_ALG_TKIP);
3356 cmd->key_info = (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
3357 ? cpu_to_le32(MWL8K_KEY_FLAG_PAIRWISE)
3358 : cpu_to_le32(MWL8K_KEY_FLAG_TXGROUPKEY);
3359 cmd->key_info |= cpu_to_le32(MWL8K_KEY_FLAG_MICKEY_VALID
3360 | MWL8K_KEY_FLAG_TSC_VALID);
3361 break;
3362 case WLAN_CIPHER_SUITE_CCMP:
3363 cmd->key_type_id = cpu_to_le16(MWL8K_ALG_CCMP);
3364 cmd->key_info = (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
3365 ? cpu_to_le32(MWL8K_KEY_FLAG_PAIRWISE)
3366 : cpu_to_le32(MWL8K_KEY_FLAG_TXGROUPKEY);
3367 break;
3368 default:
3369 return -ENOTSUPP;
3370 }
3371
3372 return 0;
3373}
3374
3375static int mwl8k_cmd_encryption_set_key(struct ieee80211_hw *hw,
3376 struct ieee80211_vif *vif,
3377 u8 *addr,
3378 struct ieee80211_key_conf *key)
3379{
3380 struct mwl8k_cmd_set_key *cmd;
3381 int rc;
3382 int keymlen;
3383 u32 action;
3384 u8 idx;
3385 struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
3386
3387 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
3388 if (cmd == NULL)
3389 return -ENOMEM;
3390
3391 rc = mwl8k_encryption_set_cmd_info(cmd, addr, key);
3392 if (rc < 0)
3393 goto done;
3394
3395 idx = key->keyidx;
3396
3397 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
3398 action = MWL8K_ENCR_SET_KEY;
3399 else
3400 action = MWL8K_ENCR_SET_GROUP_KEY;
3401
3402 switch (key->cipher) {
3403 case WLAN_CIPHER_SUITE_WEP40:
3404 case WLAN_CIPHER_SUITE_WEP104:
3405 if (!mwl8k_vif->wep_key_conf[idx].enabled) {
3406 memcpy(mwl8k_vif->wep_key_conf[idx].key, key,
3407 sizeof(*key) + key->keylen);
3408 mwl8k_vif->wep_key_conf[idx].enabled = 1;
3409 }
3410
3411 keymlen = 0;
3412 action = MWL8K_ENCR_SET_KEY;
3413 break;
3414 case WLAN_CIPHER_SUITE_TKIP:
3415 keymlen = MAX_ENCR_KEY_LENGTH + 2 * MIC_KEY_LENGTH;
3416 break;
3417 case WLAN_CIPHER_SUITE_CCMP:
3418 keymlen = key->keylen;
3419 break;
3420 default:
3421 rc = -ENOTSUPP;
3422 goto done;
3423 }
3424
3425 memcpy(cmd->key_material, key->key, keymlen);
3426 cmd->action = cpu_to_le32(action);
3427
3428 rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
3429done:
3430 kfree(cmd);
3431
3432 return rc;
3433}
3434
3435static int mwl8k_cmd_encryption_remove_key(struct ieee80211_hw *hw,
3436 struct ieee80211_vif *vif,
3437 u8 *addr,
3438 struct ieee80211_key_conf *key)
3439{
3440 struct mwl8k_cmd_set_key *cmd;
3441 int rc;
3442 struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
3443
3444 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
3445 if (cmd == NULL)
3446 return -ENOMEM;
3447
3448 rc = mwl8k_encryption_set_cmd_info(cmd, addr, key);
3449 if (rc < 0)
3450 goto done;
3451
3452 if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3453 WLAN_CIPHER_SUITE_WEP104)
3454 mwl8k_vif->wep_key_conf[key->keyidx].enabled = 0;
3455
3456 cmd->action = cpu_to_le32(MWL8K_ENCR_REMOVE_KEY);
3457
3458 rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
3459done:
3460 kfree(cmd);
3461
3462 return rc;
3463}
3464
3465static int mwl8k_set_key(struct ieee80211_hw *hw,
3466 enum set_key_cmd cmd_param,
3467 struct ieee80211_vif *vif,
3468 struct ieee80211_sta *sta,
3469 struct ieee80211_key_conf *key)
3470{
3471 int rc = 0;
3472 u8 encr_type;
3473 u8 *addr;
3474 struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
3475
3476 if (vif->type == NL80211_IFTYPE_STATION)
3477 return -EOPNOTSUPP;
3478
3479 if (sta == NULL)
3480 addr = hw->wiphy->perm_addr;
3481 else
3482 addr = sta->addr;
3483
3484 if (cmd_param == SET_KEY) {
3485 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
3486 rc = mwl8k_cmd_encryption_set_key(hw, vif, addr, key);
3487 if (rc)
3488 goto out;
3489
3490 if ((key->cipher == WLAN_CIPHER_SUITE_WEP40)
3491 || (key->cipher == WLAN_CIPHER_SUITE_WEP104))
3492 encr_type = MWL8K_UPDATE_ENCRYPTION_TYPE_WEP;
3493 else
3494 encr_type = MWL8K_UPDATE_ENCRYPTION_TYPE_MIXED;
3495
3496 rc = mwl8k_cmd_update_encryption_enable(hw, vif, addr,
3497 encr_type);
3498 if (rc)
3499 goto out;
3500
3501 mwl8k_vif->is_hw_crypto_enabled = true;
3502
3503 } else {
3504 rc = mwl8k_cmd_encryption_remove_key(hw, vif, addr, key);
3505
3506 if (rc)
3507 goto out;
3508
3509 mwl8k_vif->is_hw_crypto_enabled = false;
3510
3511 }
3512out:
3513 return rc;
3514}
3515
3516/*
3102 * CMD_UPDATE_STADB. 3517 * CMD_UPDATE_STADB.
3103 */ 3518 */
3104struct ewc_ht_info { 3519struct ewc_ht_info {
@@ -3469,6 +3884,8 @@ static int mwl8k_add_interface(struct ieee80211_hw *hw,
3469 mwl8k_vif->vif = vif; 3884 mwl8k_vif->vif = vif;
3470 mwl8k_vif->macid = macid; 3885 mwl8k_vif->macid = macid;
3471 mwl8k_vif->seqno = 0; 3886 mwl8k_vif->seqno = 0;
3887 memcpy(mwl8k_vif->bssid, vif->addr, ETH_ALEN);
3888 mwl8k_vif->is_hw_crypto_enabled = false;
3472 3889
3473 /* Set the mac address. */ 3890 /* Set the mac address. */
3474 mwl8k_cmd_set_mac_addr(hw, vif, vif->addr); 3891 mwl8k_cmd_set_mac_addr(hw, vif, vif->addr);
@@ -3866,18 +4283,27 @@ static int mwl8k_sta_add(struct ieee80211_hw *hw,
3866{ 4283{
3867 struct mwl8k_priv *priv = hw->priv; 4284 struct mwl8k_priv *priv = hw->priv;
3868 int ret; 4285 int ret;
4286 int i;
4287 struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
4288 struct ieee80211_key_conf *key;
3869 4289
3870 if (!priv->ap_fw) { 4290 if (!priv->ap_fw) {
3871 ret = mwl8k_cmd_update_stadb_add(hw, vif, sta); 4291 ret = mwl8k_cmd_update_stadb_add(hw, vif, sta);
3872 if (ret >= 0) { 4292 if (ret >= 0) {
3873 MWL8K_STA(sta)->peer_id = ret; 4293 MWL8K_STA(sta)->peer_id = ret;
3874 return 0; 4294 ret = 0;
3875 } 4295 }
3876 4296
3877 return ret; 4297 } else {
4298 ret = mwl8k_cmd_set_new_stn_add(hw, vif, sta);
3878 } 4299 }
3879 4300
3880 return mwl8k_cmd_set_new_stn_add(hw, vif, sta); 4301 for (i = 0; i < NUM_WEP_KEYS; i++) {
4302 key = IEEE80211_KEY_CONF(mwl8k_vif->wep_key_conf[i].key);
4303 if (mwl8k_vif->wep_key_conf[i].enabled)
4304 mwl8k_set_key(hw, SET_KEY, vif, sta, key);
4305 }
4306 return ret;
3881} 4307}
3882 4308
3883static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue, 4309static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue,
@@ -3932,7 +4358,8 @@ static int mwl8k_get_survey(struct ieee80211_hw *hw, int idx,
3932static int 4358static int
3933mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 4359mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
3934 enum ieee80211_ampdu_mlme_action action, 4360 enum ieee80211_ampdu_mlme_action action,
3935 struct ieee80211_sta *sta, u16 tid, u16 *ssn) 4361 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
4362 u8 buf_size)
3936{ 4363{
3937 switch (action) { 4364 switch (action) {
3938 case IEEE80211_AMPDU_RX_START: 4365 case IEEE80211_AMPDU_RX_START:
@@ -3955,6 +4382,7 @@ static const struct ieee80211_ops mwl8k_ops = {
3955 .bss_info_changed = mwl8k_bss_info_changed, 4382 .bss_info_changed = mwl8k_bss_info_changed,
3956 .prepare_multicast = mwl8k_prepare_multicast, 4383 .prepare_multicast = mwl8k_prepare_multicast,
3957 .configure_filter = mwl8k_configure_filter, 4384 .configure_filter = mwl8k_configure_filter,
4385 .set_key = mwl8k_set_key,
3958 .set_rts_threshold = mwl8k_set_rts_threshold, 4386 .set_rts_threshold = mwl8k_set_rts_threshold,
3959 .sta_add = mwl8k_sta_add, 4387 .sta_add = mwl8k_sta_add,
3960 .sta_remove = mwl8k_sta_remove, 4388 .sta_remove = mwl8k_sta_remove,
diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c
index 35b09aa0529..f54e15fcd62 100644
--- a/drivers/net/wireless/p54/eeprom.c
+++ b/drivers/net/wireless/p54/eeprom.c
@@ -55,6 +55,17 @@ static struct ieee80211_rate p54_arates[] = {
55 { .bitrate = 540, .hw_value = 11, }, 55 { .bitrate = 540, .hw_value = 11, },
56}; 56};
57 57
58static struct p54_rssi_db_entry p54_rssi_default = {
59 /*
60 * The defaults are taken from usb-logs of the
61 * vendor driver. So, they should be safe to
62 * use in case we can't get a match from the
63 * rssi <-> dBm conversion database.
64 */
65 .mul = 130,
66 .add = -398,
67};
68
58#define CHAN_HAS_CAL BIT(0) 69#define CHAN_HAS_CAL BIT(0)
59#define CHAN_HAS_LIMIT BIT(1) 70#define CHAN_HAS_LIMIT BIT(1)
60#define CHAN_HAS_CURVE BIT(2) 71#define CHAN_HAS_CURVE BIT(2)
@@ -87,13 +98,27 @@ static int p54_get_band_from_freq(u16 freq)
87 return -1; 98 return -1;
88} 99}
89 100
101static int same_band(u16 freq, u16 freq2)
102{
103 return p54_get_band_from_freq(freq) == p54_get_band_from_freq(freq2);
104}
105
90static int p54_compare_channels(const void *_a, 106static int p54_compare_channels(const void *_a,
91 const void *_b) 107 const void *_b)
92{ 108{
93 const struct p54_channel_entry *a = _a; 109 const struct p54_channel_entry *a = _a;
94 const struct p54_channel_entry *b = _b; 110 const struct p54_channel_entry *b = _b;
95 111
96 return a->index - b->index; 112 return a->freq - b->freq;
113}
114
115static int p54_compare_rssichan(const void *_a,
116 const void *_b)
117{
118 const struct p54_rssi_db_entry *a = _a;
119 const struct p54_rssi_db_entry *b = _b;
120
121 return a->freq - b->freq;
97} 122}
98 123
99static int p54_fill_band_bitrates(struct ieee80211_hw *dev, 124static int p54_fill_band_bitrates(struct ieee80211_hw *dev,
@@ -145,25 +170,26 @@ static int p54_generate_band(struct ieee80211_hw *dev,
145 170
146 for (i = 0, j = 0; (j < list->band_channel_num[band]) && 171 for (i = 0, j = 0; (j < list->band_channel_num[band]) &&
147 (i < list->entries); i++) { 172 (i < list->entries); i++) {
173 struct p54_channel_entry *chan = &list->channels[i];
148 174
149 if (list->channels[i].band != band) 175 if (chan->band != band)
150 continue; 176 continue;
151 177
152 if (list->channels[i].data != CHAN_HAS_ALL) { 178 if (chan->data != CHAN_HAS_ALL) {
153 wiphy_err(dev->wiphy, 179 wiphy_err(dev->wiphy, "%s%s%s is/are missing for "
154 "%s%s%s is/are missing for channel:%d [%d MHz].\n", 180 "channel:%d [%d MHz].\n",
155 (list->channels[i].data & CHAN_HAS_CAL ? "" : 181 (chan->data & CHAN_HAS_CAL ? "" :
156 " [iqauto calibration data]"), 182 " [iqauto calibration data]"),
157 (list->channels[i].data & CHAN_HAS_LIMIT ? "" : 183 (chan->data & CHAN_HAS_LIMIT ? "" :
158 " [output power limits]"), 184 " [output power limits]"),
159 (list->channels[i].data & CHAN_HAS_CURVE ? "" : 185 (chan->data & CHAN_HAS_CURVE ? "" :
160 " [curve data]"), 186 " [curve data]"),
161 list->channels[i].index, list->channels[i].freq); 187 chan->index, chan->freq);
162 continue; 188 continue;
163 } 189 }
164 190
165 tmp->channels[j].band = list->channels[i].band; 191 tmp->channels[j].band = chan->band;
166 tmp->channels[j].center_freq = list->channels[i].freq; 192 tmp->channels[j].center_freq = chan->freq;
167 j++; 193 j++;
168 } 194 }
169 195
@@ -291,7 +317,7 @@ static int p54_generate_channel_lists(struct ieee80211_hw *dev)
291 } 317 }
292 } 318 }
293 319
294 /* sort the list by the channel index */ 320 /* sort the channel list by frequency */
295 sort(list->channels, list->entries, sizeof(struct p54_channel_entry), 321 sort(list->channels, list->entries, sizeof(struct p54_channel_entry),
296 p54_compare_channels, NULL); 322 p54_compare_channels, NULL);
297 323
@@ -410,33 +436,118 @@ static int p54_convert_rev1(struct ieee80211_hw *dev,
410static const char *p54_rf_chips[] = { "INVALID-0", "Duette3", "Duette2", 436static const char *p54_rf_chips[] = { "INVALID-0", "Duette3", "Duette2",
411 "Frisbee", "Xbow", "Longbow", "INVALID-6", "INVALID-7" }; 437 "Frisbee", "Xbow", "Longbow", "INVALID-6", "INVALID-7" };
412 438
413static void p54_parse_rssical(struct ieee80211_hw *dev, void *data, int len, 439static int p54_parse_rssical(struct ieee80211_hw *dev,
414 u16 type) 440 u8 *data, int len, u16 type)
415{ 441{
416 struct p54_common *priv = dev->priv; 442 struct p54_common *priv = dev->priv;
417 int offset = (type == PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED) ? 2 : 0; 443 struct p54_rssi_db_entry *entry;
418 int entry_size = sizeof(struct pda_rssi_cal_entry) + offset; 444 size_t db_len, entries;
419 int num_entries = (type == PDR_RSSI_LINEAR_APPROXIMATION) ? 1 : 2; 445 int offset = 0, i;
420 int i; 446
447 if (type != PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED) {
448 entries = (type == PDR_RSSI_LINEAR_APPROXIMATION) ? 1 : 2;
449 if (len != sizeof(struct pda_rssi_cal_entry) * entries) {
450 wiphy_err(dev->wiphy, "rssical size mismatch.\n");
451 goto err_data;
452 }
453 } else {
454 /*
455 * Some devices (Dell 1450 USB, Xbow 5GHz card, etc...)
456 * have an empty two byte header.
457 */
458 if (*((__le16 *)&data[offset]) == cpu_to_le16(0))
459 offset += 2;
421 460
422 if (len != (entry_size * num_entries)) { 461 entries = (len - offset) /
423 wiphy_err(dev->wiphy, 462 sizeof(struct pda_rssi_cal_ext_entry);
424 "unknown rssi calibration data packing type:(%x) len:%d.\n",
425 type, len);
426 463
427 print_hex_dump_bytes("rssical:", DUMP_PREFIX_NONE, 464 if ((len - offset) % sizeof(struct pda_rssi_cal_ext_entry) ||
428 data, len); 465 entries <= 0) {
466 wiphy_err(dev->wiphy, "invalid rssi database.\n");
467 goto err_data;
468 }
469 }
429 470
430 wiphy_err(dev->wiphy, "please report this issue.\n"); 471 db_len = sizeof(*entry) * entries;
431 return; 472 priv->rssi_db = kzalloc(db_len + sizeof(*priv->rssi_db), GFP_KERNEL);
473 if (!priv->rssi_db)
474 return -ENOMEM;
475
476 priv->rssi_db->offset = 0;
477 priv->rssi_db->entries = entries;
478 priv->rssi_db->entry_size = sizeof(*entry);
479 priv->rssi_db->len = db_len;
480
481 entry = (void *)((unsigned long)priv->rssi_db->data + priv->rssi_db->offset);
482 if (type == PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED) {
483 struct pda_rssi_cal_ext_entry *cal = (void *) &data[offset];
484
485 for (i = 0; i < entries; i++) {
486 entry[i].freq = le16_to_cpu(cal[i].freq);
487 entry[i].mul = (s16) le16_to_cpu(cal[i].mul);
488 entry[i].add = (s16) le16_to_cpu(cal[i].add);
489 }
490 } else {
491 struct pda_rssi_cal_entry *cal = (void *) &data[offset];
492
493 for (i = 0; i < entries; i++) {
494 u16 freq;
495 switch (i) {
496 case IEEE80211_BAND_2GHZ:
497 freq = 2437;
498 break;
499 case IEEE80211_BAND_5GHZ:
500 freq = 5240;
501 break;
502 }
503
504 entry[i].freq = freq;
505 entry[i].mul = (s16) le16_to_cpu(cal[i].mul);
506 entry[i].add = (s16) le16_to_cpu(cal[i].add);
507 }
432 } 508 }
433 509
434 for (i = 0; i < num_entries; i++) { 510 /* sort the list by channel frequency */
435 struct pda_rssi_cal_entry *cal = data + 511 sort(entry, entries, sizeof(*entry), p54_compare_rssichan, NULL);
436 (offset + i * entry_size); 512 return 0;
437 priv->rssical_db[i].mul = (s16) le16_to_cpu(cal->mul); 513
438 priv->rssical_db[i].add = (s16) le16_to_cpu(cal->add); 514err_data:
515 wiphy_err(dev->wiphy,
516 "rssi calibration data packing type:(%x) len:%d.\n",
517 type, len);
518
519 print_hex_dump_bytes("rssical:", DUMP_PREFIX_NONE, data, len);
520
521 wiphy_err(dev->wiphy, "please report this issue.\n");
522 return -EINVAL;
523}
524
525struct p54_rssi_db_entry *p54_rssi_find(struct p54_common *priv, const u16 freq)
526{
527 struct p54_rssi_db_entry *entry = (void *)(priv->rssi_db->data +
528 priv->rssi_db->offset);
529 int i, found = -1;
530
531 for (i = 0; i < priv->rssi_db->entries; i++) {
532 if (!same_band(freq, entry[i].freq))
533 continue;
534
535 if (found == -1) {
536 found = i;
537 continue;
538 }
539
540 /* nearest match */
541 if (abs(freq - entry[i].freq) <
542 abs(freq - entry[found].freq)) {
543 found = i;
544 continue;
545 } else {
546 break;
547 }
439 } 548 }
549
550 return found < 0 ? &p54_rssi_default : &entry[found];
440} 551}
441 552
442static void p54_parse_default_country(struct ieee80211_hw *dev, 553static void p54_parse_default_country(struct ieee80211_hw *dev,
@@ -627,21 +738,30 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
627 case PDR_RSSI_LINEAR_APPROXIMATION: 738 case PDR_RSSI_LINEAR_APPROXIMATION:
628 case PDR_RSSI_LINEAR_APPROXIMATION_DUAL_BAND: 739 case PDR_RSSI_LINEAR_APPROXIMATION_DUAL_BAND:
629 case PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED: 740 case PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED:
630 p54_parse_rssical(dev, entry->data, data_len, 741 err = p54_parse_rssical(dev, entry->data, data_len,
631 le16_to_cpu(entry->code)); 742 le16_to_cpu(entry->code));
743 if (err)
744 goto err;
632 break; 745 break;
633 case PDR_RSSI_LINEAR_APPROXIMATION_CUSTOM: { 746 case PDR_RSSI_LINEAR_APPROXIMATION_CUSTOMV2: {
634 __le16 *src = (void *) entry->data; 747 struct pda_custom_wrapper *pda = (void *) entry->data;
635 s16 *dst = (void *) &priv->rssical_db; 748 __le16 *src;
749 u16 *dst;
636 int i; 750 int i;
637 751
638 if (data_len != sizeof(priv->rssical_db)) { 752 if (priv->rssi_db || data_len < sizeof(*pda))
639 err = -EINVAL; 753 break;
640 goto err; 754
641 } 755 priv->rssi_db = p54_convert_db(pda, data_len);
642 for (i = 0; i < sizeof(priv->rssical_db) / 756 if (!priv->rssi_db)
643 sizeof(*src); i++) 757 break;
758
759 src = (void *) priv->rssi_db->data;
760 dst = (void *) priv->rssi_db->data;
761
762 for (i = 0; i < priv->rssi_db->entries; i++)
644 *(dst++) = (s16) le16_to_cpu(*(src++)); 763 *(dst++) = (s16) le16_to_cpu(*(src++));
764
645 } 765 }
646 break; 766 break;
647 case PDR_PRISM_PA_CAL_OUTPUT_POWER_LIMITS_CUSTOM: { 767 case PDR_PRISM_PA_CAL_OUTPUT_POWER_LIMITS_CUSTOM: {
@@ -717,6 +837,8 @@ good_eeprom:
717 SET_IEEE80211_PERM_ADDR(dev, perm_addr); 837 SET_IEEE80211_PERM_ADDR(dev, perm_addr);
718 } 838 }
719 839
840 priv->cur_rssi = &p54_rssi_default;
841
720 wiphy_info(dev->wiphy, "hwaddr %pM, MAC:isl38%02x RF:%s\n", 842 wiphy_info(dev->wiphy, "hwaddr %pM, MAC:isl38%02x RF:%s\n",
721 dev->wiphy->perm_addr, priv->version, 843 dev->wiphy->perm_addr, priv->version,
722 p54_rf_chips[priv->rxhw]); 844 p54_rf_chips[priv->rxhw]);
@@ -727,9 +849,11 @@ err:
727 kfree(priv->iq_autocal); 849 kfree(priv->iq_autocal);
728 kfree(priv->output_limit); 850 kfree(priv->output_limit);
729 kfree(priv->curve_data); 851 kfree(priv->curve_data);
852 kfree(priv->rssi_db);
730 priv->iq_autocal = NULL; 853 priv->iq_autocal = NULL;
731 priv->output_limit = NULL; 854 priv->output_limit = NULL;
732 priv->curve_data = NULL; 855 priv->curve_data = NULL;
856 priv->rssi_db = NULL;
733 857
734 wiphy_err(dev->wiphy, "eeprom parse failed!\n"); 858 wiphy_err(dev->wiphy, "eeprom parse failed!\n");
735 return err; 859 return err;
diff --git a/drivers/net/wireless/p54/eeprom.h b/drivers/net/wireless/p54/eeprom.h
index 9051aef1124..afde72b8460 100644
--- a/drivers/net/wireless/p54/eeprom.h
+++ b/drivers/net/wireless/p54/eeprom.h
@@ -81,6 +81,12 @@ struct pda_pa_curve_data {
81 u8 data[0]; 81 u8 data[0];
82} __packed; 82} __packed;
83 83
84struct pda_rssi_cal_ext_entry {
85 __le16 freq;
86 __le16 mul;
87 __le16 add;
88} __packed;
89
84struct pda_rssi_cal_entry { 90struct pda_rssi_cal_entry {
85 __le16 mul; 91 __le16 mul;
86 __le16 add; 92 __le16 add;
@@ -179,6 +185,7 @@ struct pda_custom_wrapper {
179 185
180/* used by our modificated eeprom image */ 186/* used by our modificated eeprom image */
181#define PDR_RSSI_LINEAR_APPROXIMATION_CUSTOM 0xDEAD 187#define PDR_RSSI_LINEAR_APPROXIMATION_CUSTOM 0xDEAD
188#define PDR_RSSI_LINEAR_APPROXIMATION_CUSTOMV2 0xCAFF
182#define PDR_PRISM_PA_CAL_OUTPUT_POWER_LIMITS_CUSTOM 0xBEEF 189#define PDR_PRISM_PA_CAL_OUTPUT_POWER_LIMITS_CUSTOM 0xBEEF
183#define PDR_PRISM_PA_CAL_CURVE_DATA_CUSTOM 0xB05D 190#define PDR_PRISM_PA_CAL_CURVE_DATA_CUSTOM 0xB05D
184 191
diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c
index 92b9b1f05fd..0d3d108f6fe 100644
--- a/drivers/net/wireless/p54/fwio.c
+++ b/drivers/net/wireless/p54/fwio.c
@@ -397,9 +397,9 @@ int p54_scan(struct p54_common *priv, u16 mode, u16 dwell)
397 union p54_scan_body_union *body; 397 union p54_scan_body_union *body;
398 struct p54_scan_tail_rate *rate; 398 struct p54_scan_tail_rate *rate;
399 struct pda_rssi_cal_entry *rssi; 399 struct pda_rssi_cal_entry *rssi;
400 struct p54_rssi_db_entry *rssi_data;
400 unsigned int i; 401 unsigned int i;
401 void *entry; 402 void *entry;
402 int band = priv->hw->conf.channel->band;
403 __le16 freq = cpu_to_le16(priv->hw->conf.channel->center_freq); 403 __le16 freq = cpu_to_le16(priv->hw->conf.channel->center_freq);
404 404
405 skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*head) + 405 skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*head) +
@@ -503,13 +503,14 @@ int p54_scan(struct p54_common *priv, u16 mode, u16 dwell)
503 } 503 }
504 504
505 rssi = (struct pda_rssi_cal_entry *) skb_put(skb, sizeof(*rssi)); 505 rssi = (struct pda_rssi_cal_entry *) skb_put(skb, sizeof(*rssi));
506 rssi->mul = cpu_to_le16(priv->rssical_db[band].mul); 506 rssi_data = p54_rssi_find(priv, le16_to_cpu(freq));
507 rssi->add = cpu_to_le16(priv->rssical_db[band].add); 507 rssi->mul = cpu_to_le16(rssi_data->mul);
508 rssi->add = cpu_to_le16(rssi_data->add);
508 if (priv->rxhw == PDR_SYNTH_FRONTEND_LONGBOW) { 509 if (priv->rxhw == PDR_SYNTH_FRONTEND_LONGBOW) {
509 /* Longbow frontend needs ever more */ 510 /* Longbow frontend needs ever more */
510 rssi = (void *) skb_put(skb, sizeof(*rssi)); 511 rssi = (void *) skb_put(skb, sizeof(*rssi));
511 rssi->mul = cpu_to_le16(priv->rssical_db[band].longbow_unkn); 512 rssi->mul = cpu_to_le16(rssi_data->longbow_unkn);
512 rssi->add = cpu_to_le16(priv->rssical_db[band].longbow_unk2); 513 rssi->add = cpu_to_le16(rssi_data->longbow_unk2);
513 } 514 }
514 515
515 if (priv->fw_var >= 0x509) { 516 if (priv->fw_var >= 0x509) {
@@ -523,6 +524,7 @@ int p54_scan(struct p54_common *priv, u16 mode, u16 dwell)
523 hdr->len = cpu_to_le16(skb->len - sizeof(*hdr)); 524 hdr->len = cpu_to_le16(skb->len - sizeof(*hdr));
524 525
525 p54_tx(priv, skb); 526 p54_tx(priv, skb);
527 priv->cur_rssi = rssi_data;
526 return 0; 528 return 0;
527 529
528err: 530err:
diff --git a/drivers/net/wireless/p54/lmac.h b/drivers/net/wireless/p54/lmac.h
index 04b63ec80fa..5ca117e6f95 100644
--- a/drivers/net/wireless/p54/lmac.h
+++ b/drivers/net/wireless/p54/lmac.h
@@ -551,6 +551,7 @@ int p54_upload_key(struct p54_common *priv, u8 algo, int slot,
551/* eeprom */ 551/* eeprom */
552int p54_download_eeprom(struct p54_common *priv, void *buf, 552int p54_download_eeprom(struct p54_common *priv, void *buf,
553 u16 offset, u16 len); 553 u16 offset, u16 len);
554struct p54_rssi_db_entry *p54_rssi_find(struct p54_common *p, const u16 freq);
554 555
555/* utility */ 556/* utility */
556u8 *p54_find_ie(struct sk_buff *skb, u8 ie); 557u8 *p54_find_ie(struct sk_buff *skb, u8 ie);
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index 622d27b6d8f..e14a05bbc48 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -524,6 +524,48 @@ static int p54_get_survey(struct ieee80211_hw *dev, int idx,
524 return 0; 524 return 0;
525} 525}
526 526
527static unsigned int p54_flush_count(struct p54_common *priv)
528{
529 unsigned int total = 0, i;
530
531 BUILD_BUG_ON(P54_QUEUE_NUM > ARRAY_SIZE(priv->tx_stats));
532
533 /*
534 * Because the firmware has the sole control over any frames
535 * in the P54_QUEUE_BEACON or P54_QUEUE_SCAN queues, they
536 * don't really count as pending or active.
537 */
538 for (i = P54_QUEUE_MGMT; i < P54_QUEUE_NUM; i++)
539 total += priv->tx_stats[i].len;
540 return total;
541}
542
543static void p54_flush(struct ieee80211_hw *dev, bool drop)
544{
545 struct p54_common *priv = dev->priv;
546 unsigned int total, i;
547
548 /*
549 * Currently, it wouldn't really matter if we wait for one second
550 * or 15 minutes. But once someone gets around and completes the
551 * TODOs [ancel stuck frames / reset device] in p54_work, it will
552 * suddenly make sense to wait that long.
553 */
554 i = P54_STATISTICS_UPDATE * 2 / 20;
555
556 /*
557 * In this case no locking is required because as we speak the
558 * queues have already been stopped and no new frames can sneak
559 * up from behind.
560 */
561 while ((total = p54_flush_count(priv) && i--)) {
562 /* waste time */
563 msleep(20);
564 }
565
566 WARN(total, "tx flush timeout, unresponsive firmware");
567}
568
527static const struct ieee80211_ops p54_ops = { 569static const struct ieee80211_ops p54_ops = {
528 .tx = p54_tx_80211, 570 .tx = p54_tx_80211,
529 .start = p54_start, 571 .start = p54_start,
@@ -536,6 +578,7 @@ static const struct ieee80211_ops p54_ops = {
536 .sta_remove = p54_sta_add_remove, 578 .sta_remove = p54_sta_add_remove,
537 .set_key = p54_set_key, 579 .set_key = p54_set_key,
538 .config = p54_config, 580 .config = p54_config,
581 .flush = p54_flush,
539 .bss_info_changed = p54_bss_info_changed, 582 .bss_info_changed = p54_bss_info_changed,
540 .configure_filter = p54_configure_filter, 583 .configure_filter = p54_configure_filter,
541 .conf_tx = p54_conf_tx, 584 .conf_tx = p54_conf_tx,
@@ -611,7 +654,7 @@ EXPORT_SYMBOL_GPL(p54_init_common);
611 654
612int p54_register_common(struct ieee80211_hw *dev, struct device *pdev) 655int p54_register_common(struct ieee80211_hw *dev, struct device *pdev)
613{ 656{
614 struct p54_common *priv = dev->priv; 657 struct p54_common __maybe_unused *priv = dev->priv;
615 int err; 658 int err;
616 659
617 err = ieee80211_register_hw(dev); 660 err = ieee80211_register_hw(dev);
@@ -642,10 +685,12 @@ void p54_free_common(struct ieee80211_hw *dev)
642 kfree(priv->iq_autocal); 685 kfree(priv->iq_autocal);
643 kfree(priv->output_limit); 686 kfree(priv->output_limit);
644 kfree(priv->curve_data); 687 kfree(priv->curve_data);
688 kfree(priv->rssi_db);
645 kfree(priv->used_rxkeys); 689 kfree(priv->used_rxkeys);
646 priv->iq_autocal = NULL; 690 priv->iq_autocal = NULL;
647 priv->output_limit = NULL; 691 priv->output_limit = NULL;
648 priv->curve_data = NULL; 692 priv->curve_data = NULL;
693 priv->rssi_db = NULL;
649 priv->used_rxkeys = NULL; 694 priv->used_rxkeys = NULL;
650 ieee80211_free_hw(dev); 695 ieee80211_free_hw(dev);
651} 696}
diff --git a/drivers/net/wireless/p54/p54.h b/drivers/net/wireless/p54/p54.h
index 43a3b2ead81..f951c8f3186 100644
--- a/drivers/net/wireless/p54/p54.h
+++ b/drivers/net/wireless/p54/p54.h
@@ -116,7 +116,8 @@ struct p54_edcf_queue_param {
116 __le16 txop; 116 __le16 txop;
117} __packed; 117} __packed;
118 118
119struct p54_rssi_linear_approximation { 119struct p54_rssi_db_entry {
120 u16 freq;
120 s16 mul; 121 s16 mul;
121 s16 add; 122 s16 add;
122 s16 longbow_unkn; 123 s16 longbow_unkn;
@@ -197,13 +198,14 @@ struct p54_common {
197 u8 rx_diversity_mask; 198 u8 rx_diversity_mask;
198 u8 tx_diversity_mask; 199 u8 tx_diversity_mask;
199 unsigned int output_power; 200 unsigned int output_power;
201 struct p54_rssi_db_entry *cur_rssi;
200 int noise; 202 int noise;
201 /* calibration, output power limit and rssi<->dBm conversation data */ 203 /* calibration, output power limit and rssi<->dBm conversation data */
202 struct pda_iq_autocal_entry *iq_autocal; 204 struct pda_iq_autocal_entry *iq_autocal;
203 unsigned int iq_autocal_len; 205 unsigned int iq_autocal_len;
204 struct p54_cal_database *curve_data; 206 struct p54_cal_database *curve_data;
205 struct p54_cal_database *output_limit; 207 struct p54_cal_database *output_limit;
206 struct p54_rssi_linear_approximation rssical_db[IEEE80211_NUM_BANDS]; 208 struct p54_cal_database *rssi_db;
207 struct ieee80211_supported_band *band_table[IEEE80211_NUM_BANDS]; 209 struct ieee80211_supported_band *band_table[IEEE80211_NUM_BANDS];
208 210
209 /* BBP/MAC state */ 211 /* BBP/MAC state */
diff --git a/drivers/net/wireless/p54/p54spi_eeprom.h b/drivers/net/wireless/p54/p54spi_eeprom.h
index d592cbd34d7..0b7bfb0adcf 100644
--- a/drivers/net/wireless/p54/p54spi_eeprom.h
+++ b/drivers/net/wireless/p54/p54spi_eeprom.h
@@ -65,9 +65,10 @@ static unsigned char p54spi_eeprom[] = {
650x03, 0x00, 0x00, 0x11, /* PDR_ANTENNA_GAIN */ 650x03, 0x00, 0x00, 0x11, /* PDR_ANTENNA_GAIN */
66 0x08, 0x08, 0x08, 0x08, 66 0x08, 0x08, 0x08, 0x08,
67 67
680x09, 0x00, 0xad, 0xde, /* PDR_RSSI_LINEAR_APPROXIMATION_CUSTOM */ 680x0a, 0x00, 0xff, 0xca, /* PDR_RSSI_LINEAR_APPROXIMATION_CUSTOMV2 */
69 0x0a, 0x01, 0x72, 0xfe, 0x1a, 0x00, 0x00, 0x00, 69 0x01, 0x00, 0x0a, 0x00,
70 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 70 0x00, 0x00, 0x0a, 0x00,
71 0x85, 0x09, 0x0a, 0x01, 0x72, 0xfe, 0x1a, 0x00, 0x00, 0x00,
71 72
72/* struct pda_custom_wrapper */ 73/* struct pda_custom_wrapper */
730x10, 0x06, 0x5d, 0xb0, /* PDR_PRISM_PA_CAL_CURVE_DATA_CUSTOM */ 740x10, 0x06, 0x5d, 0xb0, /* PDR_PRISM_PA_CAL_CURVE_DATA_CUSTOM */
@@ -671,7 +672,7 @@ static unsigned char p54spi_eeprom[] = {
671 0xa8, 0x09, 0x25, 0x00, 0xf5, 0xff, 0xf9, 0xff, 0x00, 0x01, 672 0xa8, 0x09, 0x25, 0x00, 0xf5, 0xff, 0xf9, 0xff, 0x00, 0x01,
672 673
6730x02, 0x00, 0x00, 0x00, /* PDR_END */ 6740x02, 0x00, 0x00, 0x00, /* PDR_END */
674 0x67, 0x99, 675 0xb6, 0x04,
675}; 676};
676 677
677#endif /* P54SPI_EEPROM_H */ 678#endif /* P54SPI_EEPROM_H */
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index f618b9623e5..917d5d948e3 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -273,11 +273,9 @@ void p54_tx(struct p54_common *priv, struct sk_buff *skb)
273 273
274static int p54_rssi_to_dbm(struct p54_common *priv, int rssi) 274static int p54_rssi_to_dbm(struct p54_common *priv, int rssi)
275{ 275{
276 int band = priv->hw->conf.channel->band;
277
278 if (priv->rxhw != 5) { 276 if (priv->rxhw != 5) {
279 return ((rssi * priv->rssical_db[band].mul) / 64 + 277 return ((rssi * priv->cur_rssi->mul) / 64 +
280 priv->rssical_db[band].add) / 4; 278 priv->cur_rssi->add) / 4;
281 } else { 279 } else {
282 /* 280 /*
283 * TODO: find the correct formula 281 * TODO: find the correct formula
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 6f383cd684b..f630552427b 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -97,6 +97,18 @@ config RT2800PCI_RT35XX
97 Support for these devices is non-functional at the moment and is 97 Support for these devices is non-functional at the moment and is
98 intended for testers and developers. 98 intended for testers and developers.
99 99
100config RT2800PCI_RT53XX
101 bool "rt2800-pci - Include support for rt53xx devices (EXPERIMENTAL)"
102 depends on EXPERIMENTAL
103 default n
104 ---help---
105 This adds support for rt53xx wireless chipset family to the
106 rt2800pci driver.
107 Supported chips: RT5390
108
109 Support for these devices is non-functional at the moment and is
110 intended for testers and developers.
111
100endif 112endif
101 113
102config RT2500USB 114config RT2500USB
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 54ca49ad347..2725f3c4442 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -46,7 +46,7 @@
46 * These indirect registers work with busy bits, 46 * These indirect registers work with busy bits,
47 * and we will try maximal REGISTER_BUSY_COUNT times to access 47 * and we will try maximal REGISTER_BUSY_COUNT times to access
48 * the register while taking a REGISTER_BUSY_DELAY us delay 48 * the register while taking a REGISTER_BUSY_DELAY us delay
49 * between each attampt. When the busy bit is still set at that time, 49 * between each attempt. When the busy bit is still set at that time,
50 * the access attempt is considered to have failed, 50 * the access attempt is considered to have failed,
51 * and we will print an error. 51 * and we will print an error.
52 */ 52 */
@@ -305,9 +305,7 @@ static void rt2400pci_config_intf(struct rt2x00_dev *rt2x00dev,
305 * Enable synchronisation. 305 * Enable synchronisation.
306 */ 306 */
307 rt2x00pci_register_read(rt2x00dev, CSR14, &reg); 307 rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
308 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
309 rt2x00_set_field32(&reg, CSR14_TSF_SYNC, conf->sync); 308 rt2x00_set_field32(&reg, CSR14_TSF_SYNC, conf->sync);
310 rt2x00_set_field32(&reg, CSR14_TBCN, 1);
311 rt2x00pci_register_write(rt2x00dev, CSR14, reg); 309 rt2x00pci_register_write(rt2x00dev, CSR14, reg);
312 } 310 }
313 311
@@ -647,6 +645,11 @@ static void rt2400pci_start_queue(struct data_queue *queue)
647 rt2x00pci_register_write(rt2x00dev, RXCSR0, reg); 645 rt2x00pci_register_write(rt2x00dev, RXCSR0, reg);
648 break; 646 break;
649 case QID_BEACON: 647 case QID_BEACON:
648 /*
649 * Allow the tbtt tasklet to be scheduled.
650 */
651 tasklet_enable(&rt2x00dev->tbtt_tasklet);
652
650 rt2x00pci_register_read(rt2x00dev, CSR14, &reg); 653 rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
651 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1); 654 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
652 rt2x00_set_field32(&reg, CSR14_TBCN, 1); 655 rt2x00_set_field32(&reg, CSR14_TBCN, 1);
@@ -708,6 +711,11 @@ static void rt2400pci_stop_queue(struct data_queue *queue)
708 rt2x00_set_field32(&reg, CSR14_TBCN, 0); 711 rt2x00_set_field32(&reg, CSR14_TBCN, 0);
709 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0); 712 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0);
710 rt2x00pci_register_write(rt2x00dev, CSR14, reg); 713 rt2x00pci_register_write(rt2x00dev, CSR14, reg);
714
715 /*
716 * Wait for possibly running tbtt tasklets.
717 */
718 tasklet_disable(&rt2x00dev->tbtt_tasklet);
711 break; 719 break;
712 default: 720 default:
713 break; 721 break;
@@ -963,9 +971,9 @@ static int rt2400pci_init_bbp(struct rt2x00_dev *rt2x00dev)
963static void rt2400pci_toggle_irq(struct rt2x00_dev *rt2x00dev, 971static void rt2400pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
964 enum dev_state state) 972 enum dev_state state)
965{ 973{
966 int mask = (state == STATE_RADIO_IRQ_OFF) || 974 int mask = (state == STATE_RADIO_IRQ_OFF);
967 (state == STATE_RADIO_IRQ_OFF_ISR);
968 u32 reg; 975 u32 reg;
976 unsigned long flags;
969 977
970 /* 978 /*
971 * When interrupts are being enabled, the interrupt registers 979 * When interrupts are being enabled, the interrupt registers
@@ -974,12 +982,20 @@ static void rt2400pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
974 if (state == STATE_RADIO_IRQ_ON) { 982 if (state == STATE_RADIO_IRQ_ON) {
975 rt2x00pci_register_read(rt2x00dev, CSR7, &reg); 983 rt2x00pci_register_read(rt2x00dev, CSR7, &reg);
976 rt2x00pci_register_write(rt2x00dev, CSR7, reg); 984 rt2x00pci_register_write(rt2x00dev, CSR7, reg);
985
986 /*
987 * Enable tasklets.
988 */
989 tasklet_enable(&rt2x00dev->txstatus_tasklet);
990 tasklet_enable(&rt2x00dev->rxdone_tasklet);
977 } 991 }
978 992
979 /* 993 /*
980 * Only toggle the interrupts bits we are going to use. 994 * Only toggle the interrupts bits we are going to use.
981 * Non-checked interrupt bits are disabled by default. 995 * Non-checked interrupt bits are disabled by default.
982 */ 996 */
997 spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
998
983 rt2x00pci_register_read(rt2x00dev, CSR8, &reg); 999 rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
984 rt2x00_set_field32(&reg, CSR8_TBCN_EXPIRE, mask); 1000 rt2x00_set_field32(&reg, CSR8_TBCN_EXPIRE, mask);
985 rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, mask); 1001 rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, mask);
@@ -987,6 +1003,17 @@ static void rt2400pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
987 rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, mask); 1003 rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, mask);
988 rt2x00_set_field32(&reg, CSR8_RXDONE, mask); 1004 rt2x00_set_field32(&reg, CSR8_RXDONE, mask);
989 rt2x00pci_register_write(rt2x00dev, CSR8, reg); 1005 rt2x00pci_register_write(rt2x00dev, CSR8, reg);
1006
1007 spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
1008
1009 if (state == STATE_RADIO_IRQ_OFF) {
1010 /*
1011 * Ensure that all tasklets are finished before
1012 * disabling the interrupts.
1013 */
1014 tasklet_disable(&rt2x00dev->txstatus_tasklet);
1015 tasklet_disable(&rt2x00dev->rxdone_tasklet);
1016 }
990} 1017}
991 1018
992static int rt2400pci_enable_radio(struct rt2x00_dev *rt2x00dev) 1019static int rt2400pci_enable_radio(struct rt2x00_dev *rt2x00dev)
@@ -1059,9 +1086,7 @@ static int rt2400pci_set_device_state(struct rt2x00_dev *rt2x00dev,
1059 rt2400pci_disable_radio(rt2x00dev); 1086 rt2400pci_disable_radio(rt2x00dev);
1060 break; 1087 break;
1061 case STATE_RADIO_IRQ_ON: 1088 case STATE_RADIO_IRQ_ON:
1062 case STATE_RADIO_IRQ_ON_ISR:
1063 case STATE_RADIO_IRQ_OFF: 1089 case STATE_RADIO_IRQ_OFF:
1064 case STATE_RADIO_IRQ_OFF_ISR:
1065 rt2400pci_toggle_irq(rt2x00dev, state); 1090 rt2400pci_toggle_irq(rt2x00dev, state);
1066 break; 1091 break;
1067 case STATE_DEEP_SLEEP: 1092 case STATE_DEEP_SLEEP:
@@ -1183,8 +1208,6 @@ static void rt2400pci_write_beacon(struct queue_entry *entry,
1183 /* 1208 /*
1184 * Enable beaconing again. 1209 * Enable beaconing again.
1185 */ 1210 */
1186 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
1187 rt2x00_set_field32(&reg, CSR14_TBCN, 1);
1188 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1); 1211 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1);
1189 rt2x00pci_register_write(rt2x00dev, CSR14, reg); 1212 rt2x00pci_register_write(rt2x00dev, CSR14, reg);
1190} 1213}
@@ -1289,57 +1312,71 @@ static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev,
1289 } 1312 }
1290} 1313}
1291 1314
1292static irqreturn_t rt2400pci_interrupt_thread(int irq, void *dev_instance) 1315static void rt2400pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
1316 struct rt2x00_field32 irq_field)
1293{ 1317{
1294 struct rt2x00_dev *rt2x00dev = dev_instance; 1318 unsigned long flags;
1295 u32 reg = rt2x00dev->irqvalue[0]; 1319 u32 reg;
1296 1320
1297 /* 1321 /*
1298 * Handle interrupts, walk through all bits 1322 * Enable a single interrupt. The interrupt mask register
1299 * and run the tasks, the bits are checked in order of 1323 * access needs locking.
1300 * priority.
1301 */ 1324 */
1325 spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
1302 1326
1303 /* 1327 rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
1304 * 1 - Beacon timer expired interrupt. 1328 rt2x00_set_field32(&reg, irq_field, 0);
1305 */ 1329 rt2x00pci_register_write(rt2x00dev, CSR8, reg);
1306 if (rt2x00_get_field32(reg, CSR7_TBCN_EXPIRE))
1307 rt2x00lib_beacondone(rt2x00dev);
1308 1330
1309 /* 1331 spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
1310 * 2 - Rx ring done interrupt. 1332}
1311 */
1312 if (rt2x00_get_field32(reg, CSR7_RXDONE))
1313 rt2x00pci_rxdone(rt2x00dev);
1314 1333
1315 /* 1334static void rt2400pci_txstatus_tasklet(unsigned long data)
1316 * 3 - Atim ring transmit done interrupt. 1335{
1317 */ 1336 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
1318 if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING)) 1337 u32 reg;
1319 rt2400pci_txdone(rt2x00dev, QID_ATIM); 1338 unsigned long flags;
1320 1339
1321 /* 1340 /*
1322 * 4 - Priority ring transmit done interrupt. 1341 * Handle all tx queues.
1323 */ 1342 */
1324 if (rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING)) 1343 rt2400pci_txdone(rt2x00dev, QID_ATIM);
1325 rt2400pci_txdone(rt2x00dev, QID_AC_VO); 1344 rt2400pci_txdone(rt2x00dev, QID_AC_VO);
1345 rt2400pci_txdone(rt2x00dev, QID_AC_VI);
1326 1346
1327 /* 1347 /*
1328 * 5 - Tx ring transmit done interrupt. 1348 * Enable all TXDONE interrupts again.
1329 */ 1349 */
1330 if (rt2x00_get_field32(reg, CSR7_TXDONE_TXRING)) 1350 spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
1331 rt2400pci_txdone(rt2x00dev, QID_AC_VI);
1332 1351
1333 /* Enable interrupts again. */ 1352 rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
1334 rt2x00dev->ops->lib->set_device_state(rt2x00dev, 1353 rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, 0);
1335 STATE_RADIO_IRQ_ON_ISR); 1354 rt2x00_set_field32(&reg, CSR8_TXDONE_ATIMRING, 0);
1336 return IRQ_HANDLED; 1355 rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, 0);
1356 rt2x00pci_register_write(rt2x00dev, CSR8, reg);
1357
1358 spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
1359}
1360
1361static void rt2400pci_tbtt_tasklet(unsigned long data)
1362{
1363 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
1364 rt2x00lib_beacondone(rt2x00dev);
1365 rt2400pci_enable_interrupt(rt2x00dev, CSR8_TBCN_EXPIRE);
1366}
1367
1368static void rt2400pci_rxdone_tasklet(unsigned long data)
1369{
1370 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
1371 rt2x00pci_rxdone(rt2x00dev);
1372 rt2400pci_enable_interrupt(rt2x00dev, CSR8_RXDONE);
1337} 1373}
1338 1374
1339static irqreturn_t rt2400pci_interrupt(int irq, void *dev_instance) 1375static irqreturn_t rt2400pci_interrupt(int irq, void *dev_instance)
1340{ 1376{
1341 struct rt2x00_dev *rt2x00dev = dev_instance; 1377 struct rt2x00_dev *rt2x00dev = dev_instance;
1342 u32 reg; 1378 u32 reg, mask;
1379 unsigned long flags;
1343 1380
1344 /* 1381 /*
1345 * Get the interrupt sources & saved to local variable. 1382 * Get the interrupt sources & saved to local variable.
@@ -1354,14 +1391,44 @@ static irqreturn_t rt2400pci_interrupt(int irq, void *dev_instance)
1354 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 1391 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
1355 return IRQ_HANDLED; 1392 return IRQ_HANDLED;
1356 1393
1357 /* Store irqvalues for use in the interrupt thread. */ 1394 mask = reg;
1358 rt2x00dev->irqvalue[0] = reg;
1359 1395
1360 /* Disable interrupts, will be enabled again in the interrupt thread. */ 1396 /*
1361 rt2x00dev->ops->lib->set_device_state(rt2x00dev, 1397 * Schedule tasklets for interrupt handling.
1362 STATE_RADIO_IRQ_OFF_ISR); 1398 */
1399 if (rt2x00_get_field32(reg, CSR7_TBCN_EXPIRE))
1400 tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
1363 1401
1364 return IRQ_WAKE_THREAD; 1402 if (rt2x00_get_field32(reg, CSR7_RXDONE))
1403 tasklet_schedule(&rt2x00dev->rxdone_tasklet);
1404
1405 if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING) ||
1406 rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING) ||
1407 rt2x00_get_field32(reg, CSR7_TXDONE_TXRING)) {
1408 tasklet_schedule(&rt2x00dev->txstatus_tasklet);
1409 /*
1410 * Mask out all txdone interrupts.
1411 */
1412 rt2x00_set_field32(&mask, CSR8_TXDONE_TXRING, 1);
1413 rt2x00_set_field32(&mask, CSR8_TXDONE_ATIMRING, 1);
1414 rt2x00_set_field32(&mask, CSR8_TXDONE_PRIORING, 1);
1415 }
1416
1417 /*
1418 * Disable all interrupts for which a tasklet was scheduled right now,
1419 * the tasklet will reenable the appropriate interrupts.
1420 */
1421 spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
1422
1423 rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
1424 reg |= mask;
1425 rt2x00pci_register_write(rt2x00dev, CSR8, reg);
1426
1427 spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
1428
1429
1430
1431 return IRQ_HANDLED;
1365} 1432}
1366 1433
1367/* 1434/*
@@ -1655,7 +1722,9 @@ static const struct ieee80211_ops rt2400pci_mac80211_ops = {
1655 1722
1656static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = { 1723static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = {
1657 .irq_handler = rt2400pci_interrupt, 1724 .irq_handler = rt2400pci_interrupt,
1658 .irq_handler_thread = rt2400pci_interrupt_thread, 1725 .txstatus_tasklet = rt2400pci_txstatus_tasklet,
1726 .tbtt_tasklet = rt2400pci_tbtt_tasklet,
1727 .rxdone_tasklet = rt2400pci_rxdone_tasklet,
1659 .probe_hw = rt2400pci_probe_hw, 1728 .probe_hw = rt2400pci_probe_hw,
1660 .initialize = rt2x00pci_initialize, 1729 .initialize = rt2x00pci_initialize,
1661 .uninitialize = rt2x00pci_uninitialize, 1730 .uninitialize = rt2x00pci_uninitialize,
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index a9ff26a2772..3ef1fb4185c 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -311,9 +311,7 @@ static void rt2500pci_config_intf(struct rt2x00_dev *rt2x00dev,
311 * Enable synchronisation. 311 * Enable synchronisation.
312 */ 312 */
313 rt2x00pci_register_read(rt2x00dev, CSR14, &reg); 313 rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
314 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
315 rt2x00_set_field32(&reg, CSR14_TSF_SYNC, conf->sync); 314 rt2x00_set_field32(&reg, CSR14_TSF_SYNC, conf->sync);
316 rt2x00_set_field32(&reg, CSR14_TBCN, 1);
317 rt2x00pci_register_write(rt2x00dev, CSR14, reg); 315 rt2x00pci_register_write(rt2x00dev, CSR14, reg);
318 } 316 }
319 317
@@ -737,6 +735,11 @@ static void rt2500pci_start_queue(struct data_queue *queue)
737 rt2x00pci_register_write(rt2x00dev, RXCSR0, reg); 735 rt2x00pci_register_write(rt2x00dev, RXCSR0, reg);
738 break; 736 break;
739 case QID_BEACON: 737 case QID_BEACON:
738 /*
739 * Allow the tbtt tasklet to be scheduled.
740 */
741 tasklet_enable(&rt2x00dev->tbtt_tasklet);
742
740 rt2x00pci_register_read(rt2x00dev, CSR14, &reg); 743 rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
741 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1); 744 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
742 rt2x00_set_field32(&reg, CSR14_TBCN, 1); 745 rt2x00_set_field32(&reg, CSR14_TBCN, 1);
@@ -798,6 +801,11 @@ static void rt2500pci_stop_queue(struct data_queue *queue)
798 rt2x00_set_field32(&reg, CSR14_TBCN, 0); 801 rt2x00_set_field32(&reg, CSR14_TBCN, 0);
799 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0); 802 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0);
800 rt2x00pci_register_write(rt2x00dev, CSR14, reg); 803 rt2x00pci_register_write(rt2x00dev, CSR14, reg);
804
805 /*
806 * Wait for possibly running tbtt tasklets.
807 */
808 tasklet_disable(&rt2x00dev->tbtt_tasklet);
801 break; 809 break;
802 default: 810 default:
803 break; 811 break;
@@ -1118,9 +1126,9 @@ static int rt2500pci_init_bbp(struct rt2x00_dev *rt2x00dev)
1118static void rt2500pci_toggle_irq(struct rt2x00_dev *rt2x00dev, 1126static void rt2500pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
1119 enum dev_state state) 1127 enum dev_state state)
1120{ 1128{
1121 int mask = (state == STATE_RADIO_IRQ_OFF) || 1129 int mask = (state == STATE_RADIO_IRQ_OFF);
1122 (state == STATE_RADIO_IRQ_OFF_ISR);
1123 u32 reg; 1130 u32 reg;
1131 unsigned long flags;
1124 1132
1125 /* 1133 /*
1126 * When interrupts are being enabled, the interrupt registers 1134 * When interrupts are being enabled, the interrupt registers
@@ -1129,12 +1137,20 @@ static void rt2500pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
1129 if (state == STATE_RADIO_IRQ_ON) { 1137 if (state == STATE_RADIO_IRQ_ON) {
1130 rt2x00pci_register_read(rt2x00dev, CSR7, &reg); 1138 rt2x00pci_register_read(rt2x00dev, CSR7, &reg);
1131 rt2x00pci_register_write(rt2x00dev, CSR7, reg); 1139 rt2x00pci_register_write(rt2x00dev, CSR7, reg);
1140
1141 /*
1142 * Enable tasklets.
1143 */
1144 tasklet_enable(&rt2x00dev->txstatus_tasklet);
1145 tasklet_enable(&rt2x00dev->rxdone_tasklet);
1132 } 1146 }
1133 1147
1134 /* 1148 /*
1135 * Only toggle the interrupts bits we are going to use. 1149 * Only toggle the interrupts bits we are going to use.
1136 * Non-checked interrupt bits are disabled by default. 1150 * Non-checked interrupt bits are disabled by default.
1137 */ 1151 */
1152 spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
1153
1138 rt2x00pci_register_read(rt2x00dev, CSR8, &reg); 1154 rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
1139 rt2x00_set_field32(&reg, CSR8_TBCN_EXPIRE, mask); 1155 rt2x00_set_field32(&reg, CSR8_TBCN_EXPIRE, mask);
1140 rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, mask); 1156 rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, mask);
@@ -1142,6 +1158,16 @@ static void rt2500pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
1142 rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, mask); 1158 rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, mask);
1143 rt2x00_set_field32(&reg, CSR8_RXDONE, mask); 1159 rt2x00_set_field32(&reg, CSR8_RXDONE, mask);
1144 rt2x00pci_register_write(rt2x00dev, CSR8, reg); 1160 rt2x00pci_register_write(rt2x00dev, CSR8, reg);
1161
1162 spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
1163
1164 if (state == STATE_RADIO_IRQ_OFF) {
1165 /*
1166 * Ensure that all tasklets are finished.
1167 */
1168 tasklet_disable(&rt2x00dev->txstatus_tasklet);
1169 tasklet_disable(&rt2x00dev->rxdone_tasklet);
1170 }
1145} 1171}
1146 1172
1147static int rt2500pci_enable_radio(struct rt2x00_dev *rt2x00dev) 1173static int rt2500pci_enable_radio(struct rt2x00_dev *rt2x00dev)
@@ -1214,9 +1240,7 @@ static int rt2500pci_set_device_state(struct rt2x00_dev *rt2x00dev,
1214 rt2500pci_disable_radio(rt2x00dev); 1240 rt2500pci_disable_radio(rt2x00dev);
1215 break; 1241 break;
1216 case STATE_RADIO_IRQ_ON: 1242 case STATE_RADIO_IRQ_ON:
1217 case STATE_RADIO_IRQ_ON_ISR:
1218 case STATE_RADIO_IRQ_OFF: 1243 case STATE_RADIO_IRQ_OFF:
1219 case STATE_RADIO_IRQ_OFF_ISR:
1220 rt2500pci_toggle_irq(rt2x00dev, state); 1244 rt2500pci_toggle_irq(rt2x00dev, state);
1221 break; 1245 break;
1222 case STATE_DEEP_SLEEP: 1246 case STATE_DEEP_SLEEP:
@@ -1337,8 +1361,6 @@ static void rt2500pci_write_beacon(struct queue_entry *entry,
1337 /* 1361 /*
1338 * Enable beaconing again. 1362 * Enable beaconing again.
1339 */ 1363 */
1340 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
1341 rt2x00_set_field32(&reg, CSR14_TBCN, 1);
1342 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1); 1364 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1);
1343 rt2x00pci_register_write(rt2x00dev, CSR14, reg); 1365 rt2x00pci_register_write(rt2x00dev, CSR14, reg);
1344} 1366}
@@ -1422,58 +1444,71 @@ static void rt2500pci_txdone(struct rt2x00_dev *rt2x00dev,
1422 } 1444 }
1423} 1445}
1424 1446
1425static irqreturn_t rt2500pci_interrupt_thread(int irq, void *dev_instance) 1447static void rt2500pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
1448 struct rt2x00_field32 irq_field)
1426{ 1449{
1427 struct rt2x00_dev *rt2x00dev = dev_instance; 1450 unsigned long flags;
1428 u32 reg = rt2x00dev->irqvalue[0]; 1451 u32 reg;
1429 1452
1430 /* 1453 /*
1431 * Handle interrupts, walk through all bits 1454 * Enable a single interrupt. The interrupt mask register
1432 * and run the tasks, the bits are checked in order of 1455 * access needs locking.
1433 * priority.
1434 */ 1456 */
1457 spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
1435 1458
1436 /* 1459 rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
1437 * 1 - Beacon timer expired interrupt. 1460 rt2x00_set_field32(&reg, irq_field, 0);
1438 */ 1461 rt2x00pci_register_write(rt2x00dev, CSR8, reg);
1439 if (rt2x00_get_field32(reg, CSR7_TBCN_EXPIRE))
1440 rt2x00lib_beacondone(rt2x00dev);
1441 1462
1442 /* 1463 spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
1443 * 2 - Rx ring done interrupt. 1464}
1444 */
1445 if (rt2x00_get_field32(reg, CSR7_RXDONE))
1446 rt2x00pci_rxdone(rt2x00dev);
1447 1465
1448 /* 1466static void rt2500pci_txstatus_tasklet(unsigned long data)
1449 * 3 - Atim ring transmit done interrupt. 1467{
1450 */ 1468 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
1451 if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING)) 1469 u32 reg;
1452 rt2500pci_txdone(rt2x00dev, QID_ATIM); 1470 unsigned long flags;
1453 1471
1454 /* 1472 /*
1455 * 4 - Priority ring transmit done interrupt. 1473 * Handle all tx queues.
1456 */ 1474 */
1457 if (rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING)) 1475 rt2500pci_txdone(rt2x00dev, QID_ATIM);
1458 rt2500pci_txdone(rt2x00dev, QID_AC_VO); 1476 rt2500pci_txdone(rt2x00dev, QID_AC_VO);
1477 rt2500pci_txdone(rt2x00dev, QID_AC_VI);
1459 1478
1460 /* 1479 /*
1461 * 5 - Tx ring transmit done interrupt. 1480 * Enable all TXDONE interrupts again.
1462 */ 1481 */
1463 if (rt2x00_get_field32(reg, CSR7_TXDONE_TXRING)) 1482 spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
1464 rt2500pci_txdone(rt2x00dev, QID_AC_VI); 1483
1484 rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
1485 rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, 0);
1486 rt2x00_set_field32(&reg, CSR8_TXDONE_ATIMRING, 0);
1487 rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, 0);
1488 rt2x00pci_register_write(rt2x00dev, CSR8, reg);
1465 1489
1466 /* Enable interrupts again. */ 1490 spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
1467 rt2x00dev->ops->lib->set_device_state(rt2x00dev, 1491}
1468 STATE_RADIO_IRQ_ON_ISR);
1469 1492
1470 return IRQ_HANDLED; 1493static void rt2500pci_tbtt_tasklet(unsigned long data)
1494{
1495 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
1496 rt2x00lib_beacondone(rt2x00dev);
1497 rt2500pci_enable_interrupt(rt2x00dev, CSR8_TBCN_EXPIRE);
1498}
1499
1500static void rt2500pci_rxdone_tasklet(unsigned long data)
1501{
1502 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
1503 rt2x00pci_rxdone(rt2x00dev);
1504 rt2500pci_enable_interrupt(rt2x00dev, CSR8_RXDONE);
1471} 1505}
1472 1506
1473static irqreturn_t rt2500pci_interrupt(int irq, void *dev_instance) 1507static irqreturn_t rt2500pci_interrupt(int irq, void *dev_instance)
1474{ 1508{
1475 struct rt2x00_dev *rt2x00dev = dev_instance; 1509 struct rt2x00_dev *rt2x00dev = dev_instance;
1476 u32 reg; 1510 u32 reg, mask;
1511 unsigned long flags;
1477 1512
1478 /* 1513 /*
1479 * Get the interrupt sources & saved to local variable. 1514 * Get the interrupt sources & saved to local variable.
@@ -1488,14 +1523,42 @@ static irqreturn_t rt2500pci_interrupt(int irq, void *dev_instance)
1488 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 1523 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
1489 return IRQ_HANDLED; 1524 return IRQ_HANDLED;
1490 1525
1491 /* Store irqvalues for use in the interrupt thread. */ 1526 mask = reg;
1492 rt2x00dev->irqvalue[0] = reg;
1493 1527
1494 /* Disable interrupts, will be enabled again in the interrupt thread. */ 1528 /*
1495 rt2x00dev->ops->lib->set_device_state(rt2x00dev, 1529 * Schedule tasklets for interrupt handling.
1496 STATE_RADIO_IRQ_OFF_ISR); 1530 */
1531 if (rt2x00_get_field32(reg, CSR7_TBCN_EXPIRE))
1532 tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
1497 1533
1498 return IRQ_WAKE_THREAD; 1534 if (rt2x00_get_field32(reg, CSR7_RXDONE))
1535 tasklet_schedule(&rt2x00dev->rxdone_tasklet);
1536
1537 if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING) ||
1538 rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING) ||
1539 rt2x00_get_field32(reg, CSR7_TXDONE_TXRING)) {
1540 tasklet_schedule(&rt2x00dev->txstatus_tasklet);
1541 /*
1542 * Mask out all txdone interrupts.
1543 */
1544 rt2x00_set_field32(&mask, CSR8_TXDONE_TXRING, 1);
1545 rt2x00_set_field32(&mask, CSR8_TXDONE_ATIMRING, 1);
1546 rt2x00_set_field32(&mask, CSR8_TXDONE_PRIORING, 1);
1547 }
1548
1549 /*
1550 * Disable all interrupts for which a tasklet was scheduled right now,
1551 * the tasklet will reenable the appropriate interrupts.
1552 */
1553 spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
1554
1555 rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
1556 reg |= mask;
1557 rt2x00pci_register_write(rt2x00dev, CSR8, reg);
1558
1559 spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
1560
1561 return IRQ_HANDLED;
1499} 1562}
1500 1563
1501/* 1564/*
@@ -1952,7 +2015,9 @@ static const struct ieee80211_ops rt2500pci_mac80211_ops = {
1952 2015
1953static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = { 2016static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = {
1954 .irq_handler = rt2500pci_interrupt, 2017 .irq_handler = rt2500pci_interrupt,
1955 .irq_handler_thread = rt2500pci_interrupt_thread, 2018 .txstatus_tasklet = rt2500pci_txstatus_tasklet,
2019 .tbtt_tasklet = rt2500pci_tbtt_tasklet,
2020 .rxdone_tasklet = rt2500pci_rxdone_tasklet,
1956 .probe_hw = rt2500pci_probe_hw, 2021 .probe_hw = rt2500pci_probe_hw,
1957 .initialize = rt2x00pci_initialize, 2022 .initialize = rt2x00pci_initialize,
1958 .uninitialize = rt2x00pci_uninitialize, 2023 .uninitialize = rt2x00pci_uninitialize,
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 6b3b1de4679..01f385d5846 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -478,9 +478,7 @@ static void rt2500usb_config_intf(struct rt2x00_dev *rt2x00dev,
478 rt2500usb_register_write(rt2x00dev, TXRX_CSR18, reg); 478 rt2500usb_register_write(rt2x00dev, TXRX_CSR18, reg);
479 479
480 rt2500usb_register_read(rt2x00dev, TXRX_CSR19, &reg); 480 rt2500usb_register_read(rt2x00dev, TXRX_CSR19, &reg);
481 rt2x00_set_field16(&reg, TXRX_CSR19_TSF_COUNT, 1);
482 rt2x00_set_field16(&reg, TXRX_CSR19_TSF_SYNC, conf->sync); 481 rt2x00_set_field16(&reg, TXRX_CSR19_TSF_SYNC, conf->sync);
483 rt2x00_set_field16(&reg, TXRX_CSR19_TBCN, 1);
484 rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); 482 rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
485 } 483 }
486 484
@@ -1056,9 +1054,7 @@ static int rt2500usb_set_device_state(struct rt2x00_dev *rt2x00dev,
1056 rt2500usb_disable_radio(rt2x00dev); 1054 rt2500usb_disable_radio(rt2x00dev);
1057 break; 1055 break;
1058 case STATE_RADIO_IRQ_ON: 1056 case STATE_RADIO_IRQ_ON:
1059 case STATE_RADIO_IRQ_ON_ISR:
1060 case STATE_RADIO_IRQ_OFF: 1057 case STATE_RADIO_IRQ_OFF:
1061 case STATE_RADIO_IRQ_OFF_ISR:
1062 /* No support, but no error either */ 1058 /* No support, but no error either */
1063 break; 1059 break;
1064 case STATE_DEEP_SLEEP: 1060 case STATE_DEEP_SLEEP:
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index 4c55e8525ca..6f4a2432c02 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -51,6 +51,7 @@
51 * RF3320 2.4G 1T1R(RT3350/RT3370/RT3390) 51 * RF3320 2.4G 1T1R(RT3350/RT3370/RT3390)
52 * RF3322 2.4G 2T2R(RT3352/RT3371/RT3372/RT3391/RT3392) 52 * RF3322 2.4G 2T2R(RT3352/RT3371/RT3372/RT3391/RT3392)
53 * RF3853 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662) 53 * RF3853 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662)
54 * RF5390 2.4G 1T1R
54 */ 55 */
55#define RF2820 0x0001 56#define RF2820 0x0001
56#define RF2850 0x0002 57#define RF2850 0x0002
@@ -65,6 +66,7 @@
65#define RF3320 0x000b 66#define RF3320 0x000b
66#define RF3322 0x000c 67#define RF3322 0x000c
67#define RF3853 0x000d 68#define RF3853 0x000d
69#define RF5390 0x5390
68 70
69/* 71/*
70 * Chipset revisions. 72 * Chipset revisions.
@@ -77,6 +79,7 @@
77#define REV_RT3071E 0x0211 79#define REV_RT3071E 0x0211
78#define REV_RT3090E 0x0211 80#define REV_RT3090E 0x0211
79#define REV_RT3390E 0x0211 81#define REV_RT3390E 0x0211
82#define REV_RT5390F 0x0502
80 83
81/* 84/*
82 * Signal information. 85 * Signal information.
@@ -121,6 +124,13 @@
121#define E2PROM_CSR_RELOAD FIELD32(0x00000080) 124#define E2PROM_CSR_RELOAD FIELD32(0x00000080)
122 125
123/* 126/*
127 * AUX_CTRL: Aux/PCI-E related configuration
128 */
129#define AUX_CTRL 0x10c
130#define AUX_CTRL_WAKE_PCIE_EN FIELD32(0x00000002)
131#define AUX_CTRL_FORCE_PCIE_CLK FIELD32(0x00000400)
132
133/*
124 * OPT_14: Unknown register used by rt3xxx devices. 134 * OPT_14: Unknown register used by rt3xxx devices.
125 */ 135 */
126#define OPT_14_CSR 0x0114 136#define OPT_14_CSR 0x0114
@@ -270,6 +280,7 @@
270 280
271/* 281/*
272 * GPIO_CTRL_CFG: 282 * GPIO_CTRL_CFG:
283 * GPIOD: GPIO direction, 0: Output, 1: Input
273 */ 284 */
274#define GPIO_CTRL_CFG 0x0228 285#define GPIO_CTRL_CFG 0x0228
275#define GPIO_CTRL_CFG_BIT0 FIELD32(0x00000001) 286#define GPIO_CTRL_CFG_BIT0 FIELD32(0x00000001)
@@ -280,7 +291,14 @@
280#define GPIO_CTRL_CFG_BIT5 FIELD32(0x00000020) 291#define GPIO_CTRL_CFG_BIT5 FIELD32(0x00000020)
281#define GPIO_CTRL_CFG_BIT6 FIELD32(0x00000040) 292#define GPIO_CTRL_CFG_BIT6 FIELD32(0x00000040)
282#define GPIO_CTRL_CFG_BIT7 FIELD32(0x00000080) 293#define GPIO_CTRL_CFG_BIT7 FIELD32(0x00000080)
283#define GPIO_CTRL_CFG_BIT8 FIELD32(0x00000100) 294#define GPIO_CTRL_CFG_GPIOD_BIT0 FIELD32(0x00000100)
295#define GPIO_CTRL_CFG_GPIOD_BIT1 FIELD32(0x00000200)
296#define GPIO_CTRL_CFG_GPIOD_BIT2 FIELD32(0x00000400)
297#define GPIO_CTRL_CFG_GPIOD_BIT3 FIELD32(0x00000800)
298#define GPIO_CTRL_CFG_GPIOD_BIT4 FIELD32(0x00001000)
299#define GPIO_CTRL_CFG_GPIOD_BIT5 FIELD32(0x00002000)
300#define GPIO_CTRL_CFG_GPIOD_BIT6 FIELD32(0x00004000)
301#define GPIO_CTRL_CFG_GPIOD_BIT7 FIELD32(0x00008000)
284 302
285/* 303/*
286 * MCU_CMD_CFG 304 * MCU_CMD_CFG
@@ -372,8 +390,12 @@
372 390
373/* 391/*
374 * US_CYC_CNT 392 * US_CYC_CNT
393 * BT_MODE_EN: Bluetooth mode enable
394 * CLOCK CYCLE: Clock cycle count in 1us.
395 * PCI:0x21, PCIE:0x7d, USB:0x1e
375 */ 396 */
376#define US_CYC_CNT 0x02a4 397#define US_CYC_CNT 0x02a4
398#define US_CYC_CNT_BT_MODE_EN FIELD32(0x00000100)
377#define US_CYC_CNT_CLOCK_CYCLE FIELD32(0x000000ff) 399#define US_CYC_CNT_CLOCK_CYCLE FIELD32(0x000000ff)
378 400
379/* 401/*
@@ -442,7 +464,7 @@
442 */ 464 */
443#define RF_CSR_CFG 0x0500 465#define RF_CSR_CFG 0x0500
444#define RF_CSR_CFG_DATA FIELD32(0x000000ff) 466#define RF_CSR_CFG_DATA FIELD32(0x000000ff)
445#define RF_CSR_CFG_REGNUM FIELD32(0x00001f00) 467#define RF_CSR_CFG_REGNUM FIELD32(0x00003f00)
446#define RF_CSR_CFG_WRITE FIELD32(0x00010000) 468#define RF_CSR_CFG_WRITE FIELD32(0x00010000)
447#define RF_CSR_CFG_BUSY FIELD32(0x00020000) 469#define RF_CSR_CFG_BUSY FIELD32(0x00020000)
448 470
@@ -1132,8 +1154,8 @@
1132 * PROTECT_RATE: Protection control frame rate for CCK TX(RTS/CTS/CFEnd) 1154 * PROTECT_RATE: Protection control frame rate for CCK TX(RTS/CTS/CFEnd)
1133 * PROTECT_CTRL: Protection control frame type for CCK TX 1155 * PROTECT_CTRL: Protection control frame type for CCK TX
1134 * 0:none, 1:RTS/CTS, 2:CTS-to-self 1156 * 0:none, 1:RTS/CTS, 2:CTS-to-self
1135 * PROTECT_NAV: TXOP protection type for CCK TX 1157 * PROTECT_NAV_SHORT: TXOP protection type for CCK TX with short NAV
1136 * 0:none, 1:ShortNAVprotect, 2:LongNAVProtect 1158 * PROTECT_NAV_LONG: TXOP protection type for CCK TX with long NAV
1137 * TX_OP_ALLOW_CCK: CCK TXOP allowance, 0:disallow 1159 * TX_OP_ALLOW_CCK: CCK TXOP allowance, 0:disallow
1138 * TX_OP_ALLOW_OFDM: CCK TXOP allowance, 0:disallow 1160 * TX_OP_ALLOW_OFDM: CCK TXOP allowance, 0:disallow
1139 * TX_OP_ALLOW_MM20: CCK TXOP allowance, 0:disallow 1161 * TX_OP_ALLOW_MM20: CCK TXOP allowance, 0:disallow
@@ -1145,7 +1167,8 @@
1145#define CCK_PROT_CFG 0x1364 1167#define CCK_PROT_CFG 0x1364
1146#define CCK_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff) 1168#define CCK_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
1147#define CCK_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000) 1169#define CCK_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
1148#define CCK_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000) 1170#define CCK_PROT_CFG_PROTECT_NAV_SHORT FIELD32(0x00040000)
1171#define CCK_PROT_CFG_PROTECT_NAV_LONG FIELD32(0x00080000)
1149#define CCK_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000) 1172#define CCK_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
1150#define CCK_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000) 1173#define CCK_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
1151#define CCK_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000) 1174#define CCK_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
@@ -1160,7 +1183,8 @@
1160#define OFDM_PROT_CFG 0x1368 1183#define OFDM_PROT_CFG 0x1368
1161#define OFDM_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff) 1184#define OFDM_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
1162#define OFDM_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000) 1185#define OFDM_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
1163#define OFDM_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000) 1186#define OFDM_PROT_CFG_PROTECT_NAV_SHORT FIELD32(0x00040000)
1187#define OFDM_PROT_CFG_PROTECT_NAV_LONG FIELD32(0x00080000)
1164#define OFDM_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000) 1188#define OFDM_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
1165#define OFDM_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000) 1189#define OFDM_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
1166#define OFDM_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000) 1190#define OFDM_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
@@ -1175,7 +1199,8 @@
1175#define MM20_PROT_CFG 0x136c 1199#define MM20_PROT_CFG 0x136c
1176#define MM20_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff) 1200#define MM20_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
1177#define MM20_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000) 1201#define MM20_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
1178#define MM20_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000) 1202#define MM20_PROT_CFG_PROTECT_NAV_SHORT FIELD32(0x00040000)
1203#define MM20_PROT_CFG_PROTECT_NAV_LONG FIELD32(0x00080000)
1179#define MM20_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000) 1204#define MM20_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
1180#define MM20_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000) 1205#define MM20_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
1181#define MM20_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000) 1206#define MM20_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
@@ -1190,7 +1215,8 @@
1190#define MM40_PROT_CFG 0x1370 1215#define MM40_PROT_CFG 0x1370
1191#define MM40_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff) 1216#define MM40_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
1192#define MM40_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000) 1217#define MM40_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
1193#define MM40_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000) 1218#define MM40_PROT_CFG_PROTECT_NAV_SHORT FIELD32(0x00040000)
1219#define MM40_PROT_CFG_PROTECT_NAV_LONG FIELD32(0x00080000)
1194#define MM40_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000) 1220#define MM40_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
1195#define MM40_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000) 1221#define MM40_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
1196#define MM40_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000) 1222#define MM40_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
@@ -1205,7 +1231,8 @@
1205#define GF20_PROT_CFG 0x1374 1231#define GF20_PROT_CFG 0x1374
1206#define GF20_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff) 1232#define GF20_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
1207#define GF20_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000) 1233#define GF20_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
1208#define GF20_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000) 1234#define GF20_PROT_CFG_PROTECT_NAV_SHORT FIELD32(0x00040000)
1235#define GF20_PROT_CFG_PROTECT_NAV_LONG FIELD32(0x00080000)
1209#define GF20_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000) 1236#define GF20_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
1210#define GF20_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000) 1237#define GF20_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
1211#define GF20_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000) 1238#define GF20_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
@@ -1220,7 +1247,8 @@
1220#define GF40_PROT_CFG 0x1378 1247#define GF40_PROT_CFG 0x1378
1221#define GF40_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff) 1248#define GF40_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
1222#define GF40_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000) 1249#define GF40_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
1223#define GF40_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000) 1250#define GF40_PROT_CFG_PROTECT_NAV_SHORT FIELD32(0x00040000)
1251#define GF40_PROT_CFG_PROTECT_NAV_LONG FIELD32(0x00080000)
1224#define GF40_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000) 1252#define GF40_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
1225#define GF40_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000) 1253#define GF40_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
1226#define GF40_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000) 1254#define GF40_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
@@ -1697,11 +1725,14 @@ struct mac_iveiv_entry {
1697 */ 1725 */
1698 1726
1699/* 1727/*
1700 * BBP 1: TX Antenna & Power 1728 * BBP 1: TX Antenna & Power Control
1701 * POWER: 0 - normal, 1 - drop tx power by 6dBm, 2 - drop tx power by 12dBm, 1729 * POWER_CTRL:
1702 * 3 - increase tx power by 6dBm 1730 * 0 - normal,
1731 * 1 - drop tx power by 6dBm,
1732 * 2 - drop tx power by 12dBm,
1733 * 3 - increase tx power by 6dBm
1703 */ 1734 */
1704#define BBP1_TX_POWER FIELD8(0x07) 1735#define BBP1_TX_POWER_CTRL FIELD8(0x07)
1705#define BBP1_TX_ANTENNA FIELD8(0x18) 1736#define BBP1_TX_ANTENNA FIELD8(0x18)
1706 1737
1707/* 1738/*
@@ -1715,6 +1746,13 @@ struct mac_iveiv_entry {
1715 */ 1746 */
1716#define BBP4_TX_BF FIELD8(0x01) 1747#define BBP4_TX_BF FIELD8(0x01)
1717#define BBP4_BANDWIDTH FIELD8(0x18) 1748#define BBP4_BANDWIDTH FIELD8(0x18)
1749#define BBP4_MAC_IF_CTRL FIELD8(0x40)
1750
1751/*
1752 * BBP 109
1753 */
1754#define BBP109_TX0_POWER FIELD8(0x0f)
1755#define BBP109_TX1_POWER FIELD8(0xf0)
1718 1756
1719/* 1757/*
1720 * BBP 138: Unknown 1758 * BBP 138: Unknown
@@ -1725,6 +1763,11 @@ struct mac_iveiv_entry {
1725#define BBP138_TX_DAC2 FIELD8(0x40) 1763#define BBP138_TX_DAC2 FIELD8(0x40)
1726 1764
1727/* 1765/*
1766 * BBP 152: Rx Ant
1767 */
1768#define BBP152_RX_DEFAULT_ANT FIELD8(0x80)
1769
1770/*
1728 * RFCSR registers 1771 * RFCSR registers
1729 * The wordsize of the RFCSR is 8 bits. 1772 * The wordsize of the RFCSR is 8 bits.
1730 */ 1773 */
@@ -1733,12 +1776,18 @@ struct mac_iveiv_entry {
1733 * RFCSR 1: 1776 * RFCSR 1:
1734 */ 1777 */
1735#define RFCSR1_RF_BLOCK_EN FIELD8(0x01) 1778#define RFCSR1_RF_BLOCK_EN FIELD8(0x01)
1779#define RFCSR1_PLL_PD FIELD8(0x02)
1736#define RFCSR1_RX0_PD FIELD8(0x04) 1780#define RFCSR1_RX0_PD FIELD8(0x04)
1737#define RFCSR1_TX0_PD FIELD8(0x08) 1781#define RFCSR1_TX0_PD FIELD8(0x08)
1738#define RFCSR1_RX1_PD FIELD8(0x10) 1782#define RFCSR1_RX1_PD FIELD8(0x10)
1739#define RFCSR1_TX1_PD FIELD8(0x20) 1783#define RFCSR1_TX1_PD FIELD8(0x20)
1740 1784
1741/* 1785/*
1786 * RFCSR 2:
1787 */
1788#define RFCSR2_RESCAL_EN FIELD8(0x80)
1789
1790/*
1742 * RFCSR 6: 1791 * RFCSR 6:
1743 */ 1792 */
1744#define RFCSR6_R1 FIELD8(0x03) 1793#define RFCSR6_R1 FIELD8(0x03)
@@ -1750,6 +1799,11 @@ struct mac_iveiv_entry {
1750#define RFCSR7_RF_TUNING FIELD8(0x01) 1799#define RFCSR7_RF_TUNING FIELD8(0x01)
1751 1800
1752/* 1801/*
1802 * RFCSR 11:
1803 */
1804#define RFCSR11_R FIELD8(0x03)
1805
1806/*
1753 * RFCSR 12: 1807 * RFCSR 12:
1754 */ 1808 */
1755#define RFCSR12_TX_POWER FIELD8(0x1f) 1809#define RFCSR12_TX_POWER FIELD8(0x1f)
@@ -1770,6 +1824,7 @@ struct mac_iveiv_entry {
1770#define RFCSR17_TXMIXER_GAIN FIELD8(0x07) 1824#define RFCSR17_TXMIXER_GAIN FIELD8(0x07)
1771#define RFCSR17_TX_LO1_EN FIELD8(0x08) 1825#define RFCSR17_TX_LO1_EN FIELD8(0x08)
1772#define RFCSR17_R FIELD8(0x20) 1826#define RFCSR17_R FIELD8(0x20)
1827#define RFCSR17_CODE FIELD8(0x7f)
1773 1828
1774/* 1829/*
1775 * RFCSR 20: 1830 * RFCSR 20:
@@ -1802,9 +1857,33 @@ struct mac_iveiv_entry {
1802/* 1857/*
1803 * RFCSR 30: 1858 * RFCSR 30:
1804 */ 1859 */
1860#define RFCSR30_TX_H20M FIELD8(0x02)
1861#define RFCSR30_RX_H20M FIELD8(0x04)
1862#define RFCSR30_RX_VCM FIELD8(0x18)
1805#define RFCSR30_RF_CALIBRATION FIELD8(0x80) 1863#define RFCSR30_RF_CALIBRATION FIELD8(0x80)
1806 1864
1807/* 1865/*
1866 * RFCSR 31:
1867 */
1868#define RFCSR31_RX_AGC_FC FIELD8(0x1f)
1869#define RFCSR31_RX_H20M FIELD8(0x20)
1870
1871/*
1872 * RFCSR 38:
1873 */
1874#define RFCSR38_RX_LO1_EN FIELD8(0x20)
1875
1876/*
1877 * RFCSR 39:
1878 */
1879#define RFCSR39_RX_LO2_EN FIELD8(0x80)
1880
1881/*
1882 * RFCSR 49:
1883 */
1884#define RFCSR49_TX FIELD8(0x3f)
1885
1886/*
1808 * RF registers 1887 * RF registers
1809 */ 1888 */
1810 1889
@@ -1837,6 +1916,11 @@ struct mac_iveiv_entry {
1837 */ 1916 */
1838 1917
1839/* 1918/*
1919 * Chip ID
1920 */
1921#define EEPROM_CHIP_ID 0x0000
1922
1923/*
1840 * EEPROM Version 1924 * EEPROM Version
1841 */ 1925 */
1842#define EEPROM_VERSION 0x0001 1926#define EEPROM_VERSION 0x0001
@@ -1989,23 +2073,26 @@ struct mac_iveiv_entry {
1989#define EEPROM_RSSI_A2_LNA_A2 FIELD16(0xff00) 2073#define EEPROM_RSSI_A2_LNA_A2 FIELD16(0xff00)
1990 2074
1991/* 2075/*
1992 * EEPROM Maximum TX power values 2076 * EEPROM EIRP Maximum TX power values(unit: dbm)
1993 */ 2077 */
1994#define EEPROM_MAX_TX_POWER 0x0027 2078#define EEPROM_EIRP_MAX_TX_POWER 0x0027
1995#define EEPROM_MAX_TX_POWER_24GHZ FIELD16(0x00ff) 2079#define EEPROM_EIRP_MAX_TX_POWER_2GHZ FIELD16(0x00ff)
1996#define EEPROM_MAX_TX_POWER_5GHZ FIELD16(0xff00) 2080#define EEPROM_EIRP_MAX_TX_POWER_5GHZ FIELD16(0xff00)
1997 2081
1998/* 2082/*
1999 * EEPROM TXpower delta: 20MHZ AND 40 MHZ use different power. 2083 * EEPROM TXpower delta: 20MHZ AND 40 MHZ use different power.
2000 * This is delta in 40MHZ. 2084 * This is delta in 40MHZ.
2001 * VALUE: Tx Power dalta value (MAX=4) 2085 * VALUE: Tx Power dalta value, MAX=4(unit: dbm)
2002 * TYPE: 1: Plus the delta value, 0: minus the delta value 2086 * TYPE: 1: Plus the delta value, 0: minus the delta value
2003 * TXPOWER: Enable: 2087 * ENABLE: enable tx power compensation for 40BW
2004 */ 2088 */
2005#define EEPROM_TXPOWER_DELTA 0x0028 2089#define EEPROM_TXPOWER_DELTA 0x0028
2006#define EEPROM_TXPOWER_DELTA_VALUE FIELD16(0x003f) 2090#define EEPROM_TXPOWER_DELTA_VALUE_2G FIELD16(0x003f)
2007#define EEPROM_TXPOWER_DELTA_TYPE FIELD16(0x0040) 2091#define EEPROM_TXPOWER_DELTA_TYPE_2G FIELD16(0x0040)
2008#define EEPROM_TXPOWER_DELTA_TXPOWER FIELD16(0x0080) 2092#define EEPROM_TXPOWER_DELTA_ENABLE_2G FIELD16(0x0080)
2093#define EEPROM_TXPOWER_DELTA_VALUE_5G FIELD16(0x3f00)
2094#define EEPROM_TXPOWER_DELTA_TYPE_5G FIELD16(0x4000)
2095#define EEPROM_TXPOWER_DELTA_ENABLE_5G FIELD16(0x8000)
2009 2096
2010/* 2097/*
2011 * EEPROM TXPOWER 802.11BG 2098 * EEPROM TXPOWER 802.11BG
@@ -2058,6 +2145,7 @@ struct mac_iveiv_entry {
2058#define MCU_LED_LED_POLARITY 0x54 2145#define MCU_LED_LED_POLARITY 0x54
2059#define MCU_RADAR 0x60 2146#define MCU_RADAR 0x60
2060#define MCU_BOOT_SIGNAL 0x72 2147#define MCU_BOOT_SIGNAL 0x72
2148#define MCU_ANT_SELECT 0X73
2061#define MCU_BBP_SIGNAL 0x80 2149#define MCU_BBP_SIGNAL 0x80
2062#define MCU_POWER_SAVE 0x83 2150#define MCU_POWER_SAVE 0x83
2063 2151
@@ -2202,4 +2290,9 @@ struct mac_iveiv_entry {
2202#define TXPOWER_A_TO_DEV(__txpower) \ 2290#define TXPOWER_A_TO_DEV(__txpower) \
2203 clamp_t(char, __txpower, MIN_A_TXPOWER, MAX_A_TXPOWER) 2291 clamp_t(char, __txpower, MIN_A_TXPOWER, MAX_A_TXPOWER)
2204 2292
2293/*
2294 * Board's maximun TX power limitation
2295 */
2296#define EIRP_MAX_TX_POWER_LIMIT 0x50
2297
2205#endif /* RT2800_H */ 2298#endif /* RT2800_H */
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 54917a28139..3da78bf0ca2 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -400,8 +400,15 @@ int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev,
400 if (rt2800_wait_csr_ready(rt2x00dev)) 400 if (rt2800_wait_csr_ready(rt2x00dev))
401 return -EBUSY; 401 return -EBUSY;
402 402
403 if (rt2x00_is_pci(rt2x00dev)) 403 if (rt2x00_is_pci(rt2x00dev)) {
404 if (rt2x00_rt(rt2x00dev, RT5390)) {
405 rt2800_register_read(rt2x00dev, AUX_CTRL, &reg);
406 rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
407 rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
408 rt2800_register_write(rt2x00dev, AUX_CTRL, reg);
409 }
404 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000002); 410 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000002);
411 }
405 412
406 /* 413 /*
407 * Disable DMA, will be reenabled later when enabling 414 * Disable DMA, will be reenabled later when enabling
@@ -773,13 +780,14 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
773 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 780 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
774 unsigned int beacon_base; 781 unsigned int beacon_base;
775 unsigned int padding_len; 782 unsigned int padding_len;
776 u32 reg; 783 u32 orig_reg, reg;
777 784
778 /* 785 /*
779 * Disable beaconing while we are reloading the beacon data, 786 * Disable beaconing while we are reloading the beacon data,
780 * otherwise we might be sending out invalid data. 787 * otherwise we might be sending out invalid data.
781 */ 788 */
782 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg); 789 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
790 orig_reg = reg;
783 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0); 791 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
784 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); 792 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
785 793
@@ -810,7 +818,14 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
810 * Write entire beacon with TXWI and padding to register. 818 * Write entire beacon with TXWI and padding to register.
811 */ 819 */
812 padding_len = roundup(entry->skb->len, 4) - entry->skb->len; 820 padding_len = roundup(entry->skb->len, 4) - entry->skb->len;
813 skb_pad(entry->skb, padding_len); 821 if (padding_len && skb_pad(entry->skb, padding_len)) {
822 ERROR(rt2x00dev, "Failure padding beacon, aborting\n");
823 /* skb freed by skb_pad() on failure */
824 entry->skb = NULL;
825 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, orig_reg);
826 return;
827 }
828
814 beacon_base = HW_BEACON_OFFSET(entry->entry_idx); 829 beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
815 rt2800_register_multiwrite(rt2x00dev, beacon_base, entry->skb->data, 830 rt2800_register_multiwrite(rt2x00dev, beacon_base, entry->skb->data,
816 entry->skb->len + padding_len); 831 entry->skb->len + padding_len);
@@ -818,8 +833,6 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
818 /* 833 /*
819 * Enable beaconing again. 834 * Enable beaconing again.
820 */ 835 */
821 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
822 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
823 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1); 836 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
824 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); 837 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
825 838
@@ -831,8 +844,8 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
831} 844}
832EXPORT_SYMBOL_GPL(rt2800_write_beacon); 845EXPORT_SYMBOL_GPL(rt2800_write_beacon);
833 846
834static inline void rt2800_clear_beacon(struct rt2x00_dev *rt2x00dev, 847static inline void rt2800_clear_beacon_register(struct rt2x00_dev *rt2x00dev,
835 unsigned int beacon_base) 848 unsigned int beacon_base)
836{ 849{
837 int i; 850 int i;
838 851
@@ -845,6 +858,33 @@ static inline void rt2800_clear_beacon(struct rt2x00_dev *rt2x00dev,
845 rt2800_register_write(rt2x00dev, beacon_base + i, 0); 858 rt2800_register_write(rt2x00dev, beacon_base + i, 0);
846} 859}
847 860
861void rt2800_clear_beacon(struct queue_entry *entry)
862{
863 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
864 u32 reg;
865
866 /*
867 * Disable beaconing while we are reloading the beacon data,
868 * otherwise we might be sending out invalid data.
869 */
870 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
871 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
872 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
873
874 /*
875 * Clear beacon.
876 */
877 rt2800_clear_beacon_register(rt2x00dev,
878 HW_BEACON_OFFSET(entry->entry_idx));
879
880 /*
881 * Enabled beaconing again.
882 */
883 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
884 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
885}
886EXPORT_SYMBOL_GPL(rt2800_clear_beacon);
887
848#ifdef CONFIG_RT2X00_LIB_DEBUGFS 888#ifdef CONFIG_RT2X00_LIB_DEBUGFS
849const struct rt2x00debug rt2800_rt2x00debug = { 889const struct rt2x00debug rt2800_rt2x00debug = {
850 .owner = THIS_MODULE, 890 .owner = THIS_MODULE,
@@ -1005,7 +1045,7 @@ static void rt2800_config_wcid_attr(struct rt2x00_dev *rt2x00dev,
1005 1045
1006 memset(&wcid_entry, 0, sizeof(wcid_entry)); 1046 memset(&wcid_entry, 0, sizeof(wcid_entry));
1007 if (crypto->cmd == SET_KEY) 1047 if (crypto->cmd == SET_KEY)
1008 memcpy(&wcid_entry, crypto->address, ETH_ALEN); 1048 memcpy(wcid_entry.mac, crypto->address, ETH_ALEN);
1009 rt2800_register_multiwrite(rt2x00dev, offset, 1049 rt2800_register_multiwrite(rt2x00dev, offset,
1010 &wcid_entry, sizeof(wcid_entry)); 1050 &wcid_entry, sizeof(wcid_entry));
1011} 1051}
@@ -1155,29 +1195,11 @@ void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf,
1155 1195
1156 if (flags & CONFIG_UPDATE_TYPE) { 1196 if (flags & CONFIG_UPDATE_TYPE) {
1157 /* 1197 /*
1158 * Clear current synchronisation setup.
1159 */
1160 rt2800_clear_beacon(rt2x00dev,
1161 HW_BEACON_OFFSET(intf->beacon->entry_idx));
1162 /*
1163 * Enable synchronisation. 1198 * Enable synchronisation.
1164 */ 1199 */
1165 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg); 1200 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
1166 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
1167 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_SYNC, conf->sync); 1201 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_SYNC, conf->sync);
1168 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE,
1169 (conf->sync == TSF_SYNC_ADHOC ||
1170 conf->sync == TSF_SYNC_AP_NONE));
1171 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); 1202 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
1172
1173 /*
1174 * Enable pre tbtt interrupt for beaconing modes
1175 */
1176 rt2800_register_read(rt2x00dev, INT_TIMER_EN, &reg);
1177 rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER,
1178 (conf->sync == TSF_SYNC_AP_NONE));
1179 rt2800_register_write(rt2x00dev, INT_TIMER_EN, reg);
1180
1181 } 1203 }
1182 1204
1183 if (flags & CONFIG_UPDATE_MAC) { 1205 if (flags & CONFIG_UPDATE_MAC) {
@@ -1361,10 +1383,32 @@ void rt2800_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp,
1361} 1383}
1362EXPORT_SYMBOL_GPL(rt2800_config_erp); 1384EXPORT_SYMBOL_GPL(rt2800_config_erp);
1363 1385
1386static void rt2800_set_ant_diversity(struct rt2x00_dev *rt2x00dev,
1387 enum antenna ant)
1388{
1389 u32 reg;
1390 u8 eesk_pin = (ant == ANTENNA_A) ? 1 : 0;
1391 u8 gpio_bit3 = (ant == ANTENNA_A) ? 0 : 1;
1392
1393 if (rt2x00_is_pci(rt2x00dev)) {
1394 rt2800_register_read(rt2x00dev, E2PROM_CSR, &reg);
1395 rt2x00_set_field32(&reg, E2PROM_CSR_DATA_CLOCK, eesk_pin);
1396 rt2800_register_write(rt2x00dev, E2PROM_CSR, reg);
1397 } else if (rt2x00_is_usb(rt2x00dev))
1398 rt2800_mcu_request(rt2x00dev, MCU_ANT_SELECT, 0xff,
1399 eesk_pin, 0);
1400
1401 rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
1402 rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT3, 0);
1403 rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT3, gpio_bit3);
1404 rt2800_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
1405}
1406
1364void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant) 1407void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
1365{ 1408{
1366 u8 r1; 1409 u8 r1;
1367 u8 r3; 1410 u8 r3;
1411 u16 eeprom;
1368 1412
1369 rt2800_bbp_read(rt2x00dev, 1, &r1); 1413 rt2800_bbp_read(rt2x00dev, 1, &r1);
1370 rt2800_bbp_read(rt2x00dev, 3, &r3); 1414 rt2800_bbp_read(rt2x00dev, 3, &r3);
@@ -1372,7 +1416,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
1372 /* 1416 /*
1373 * Configure the TX antenna. 1417 * Configure the TX antenna.
1374 */ 1418 */
1375 switch ((int)ant->tx) { 1419 switch (ant->tx_chain_num) {
1376 case 1: 1420 case 1:
1377 rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 0); 1421 rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 0);
1378 break; 1422 break;
@@ -1387,8 +1431,18 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
1387 /* 1431 /*
1388 * Configure the RX antenna. 1432 * Configure the RX antenna.
1389 */ 1433 */
1390 switch ((int)ant->rx) { 1434 switch (ant->rx_chain_num) {
1391 case 1: 1435 case 1:
1436 if (rt2x00_rt(rt2x00dev, RT3070) ||
1437 rt2x00_rt(rt2x00dev, RT3090) ||
1438 rt2x00_rt(rt2x00dev, RT3390)) {
1439 rt2x00_eeprom_read(rt2x00dev,
1440 EEPROM_NIC_CONF1, &eeprom);
1441 if (rt2x00_get_field16(eeprom,
1442 EEPROM_NIC_CONF1_ANT_DIVERSITY))
1443 rt2800_set_ant_diversity(rt2x00dev,
1444 rt2x00dev->default_ant.rx);
1445 }
1392 rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 0); 1446 rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 0);
1393 break; 1447 break;
1394 case 2: 1448 case 2:
@@ -1434,13 +1488,13 @@ static void rt2800_config_channel_rf2xxx(struct rt2x00_dev *rt2x00dev,
1434{ 1488{
1435 rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset); 1489 rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset);
1436 1490
1437 if (rt2x00dev->default_ant.tx == 1) 1491 if (rt2x00dev->default_ant.tx_chain_num == 1)
1438 rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_TX1, 1); 1492 rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_TX1, 1);
1439 1493
1440 if (rt2x00dev->default_ant.rx == 1) { 1494 if (rt2x00dev->default_ant.rx_chain_num == 1) {
1441 rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX1, 1); 1495 rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX1, 1);
1442 rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX2, 1); 1496 rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX2, 1);
1443 } else if (rt2x00dev->default_ant.rx == 2) 1497 } else if (rt2x00dev->default_ant.rx_chain_num == 2)
1444 rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX2, 1); 1498 rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX2, 1);
1445 1499
1446 if (rf->channel > 14) { 1500 if (rf->channel > 14) {
@@ -1526,6 +1580,99 @@ static void rt2800_config_channel_rf3xxx(struct rt2x00_dev *rt2x00dev,
1526 rt2800_rfcsr_write(rt2x00dev, 7, rfcsr); 1580 rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
1527} 1581}
1528 1582
1583
1584#define RT5390_POWER_BOUND 0x27
1585#define RT5390_FREQ_OFFSET_BOUND 0x5f
1586
1587static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
1588 struct ieee80211_conf *conf,
1589 struct rf_channel *rf,
1590 struct channel_info *info)
1591{
1592 u8 rfcsr;
1593 u16 eeprom;
1594
1595 rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1);
1596 rt2800_rfcsr_write(rt2x00dev, 9, rf->rf3);
1597 rt2800_rfcsr_read(rt2x00dev, 11, &rfcsr);
1598 rt2x00_set_field8(&rfcsr, RFCSR11_R, rf->rf2);
1599 rt2800_rfcsr_write(rt2x00dev, 11, rfcsr);
1600
1601 rt2800_rfcsr_read(rt2x00dev, 49, &rfcsr);
1602 if (info->default_power1 > RT5390_POWER_BOUND)
1603 rt2x00_set_field8(&rfcsr, RFCSR49_TX, RT5390_POWER_BOUND);
1604 else
1605 rt2x00_set_field8(&rfcsr, RFCSR49_TX, info->default_power1);
1606 rt2800_rfcsr_write(rt2x00dev, 49, rfcsr);
1607
1608 rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
1609 rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
1610 rt2x00_set_field8(&rfcsr, RFCSR1_PLL_PD, 1);
1611 rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
1612 rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1);
1613 rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
1614
1615 rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
1616 if (rt2x00dev->freq_offset > RT5390_FREQ_OFFSET_BOUND)
1617 rt2x00_set_field8(&rfcsr, RFCSR17_CODE, RT5390_FREQ_OFFSET_BOUND);
1618 else
1619 rt2x00_set_field8(&rfcsr, RFCSR17_CODE, rt2x00dev->freq_offset);
1620 rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
1621
1622 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
1623 if (rf->channel <= 14) {
1624 int idx = rf->channel-1;
1625
1626 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_BT_COEXIST)) {
1627 if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) {
1628 /* r55/r59 value array of channel 1~14 */
1629 static const char r55_bt_rev[] = {0x83, 0x83,
1630 0x83, 0x73, 0x73, 0x63, 0x53, 0x53,
1631 0x53, 0x43, 0x43, 0x43, 0x43, 0x43};
1632 static const char r59_bt_rev[] = {0x0e, 0x0e,
1633 0x0e, 0x0e, 0x0e, 0x0b, 0x0a, 0x09,
1634 0x07, 0x07, 0x07, 0x07, 0x07, 0x07};
1635
1636 rt2800_rfcsr_write(rt2x00dev, 55, r55_bt_rev[idx]);
1637 rt2800_rfcsr_write(rt2x00dev, 59, r59_bt_rev[idx]);
1638 } else {
1639 static const char r59_bt[] = {0x8b, 0x8b, 0x8b,
1640 0x8b, 0x8b, 0x8b, 0x8b, 0x8a, 0x89,
1641 0x88, 0x88, 0x86, 0x85, 0x84};
1642
1643 rt2800_rfcsr_write(rt2x00dev, 59, r59_bt[idx]);
1644 }
1645 } else {
1646 if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) {
1647 static const char r55_nonbt_rev[] = {0x23, 0x23,
1648 0x23, 0x23, 0x13, 0x13, 0x03, 0x03,
1649 0x03, 0x03, 0x03, 0x03, 0x03, 0x03};
1650 static const char r59_nonbt_rev[] = {0x07, 0x07,
1651 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
1652 0x07, 0x07, 0x06, 0x05, 0x04, 0x04};
1653
1654 rt2800_rfcsr_write(rt2x00dev, 55, r55_nonbt_rev[idx]);
1655 rt2800_rfcsr_write(rt2x00dev, 59, r59_nonbt_rev[idx]);
1656 } else if (rt2x00_rt(rt2x00dev, RT5390)) {
1657 static const char r59_non_bt[] = {0x8f, 0x8f,
1658 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8d,
1659 0x8a, 0x88, 0x88, 0x87, 0x87, 0x86};
1660
1661 rt2800_rfcsr_write(rt2x00dev, 59, r59_non_bt[idx]);
1662 }
1663 }
1664 }
1665
1666 rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
1667 rt2x00_set_field8(&rfcsr, RFCSR30_TX_H20M, 0);
1668 rt2x00_set_field8(&rfcsr, RFCSR30_RX_H20M, 0);
1669 rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
1670
1671 rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
1672 rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
1673 rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
1674}
1675
1529static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, 1676static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
1530 struct ieee80211_conf *conf, 1677 struct ieee80211_conf *conf,
1531 struct rf_channel *rf, 1678 struct rf_channel *rf,
@@ -1550,6 +1697,8 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
1550 rt2x00_rf(rt2x00dev, RF3052) || 1697 rt2x00_rf(rt2x00dev, RF3052) ||
1551 rt2x00_rf(rt2x00dev, RF3320)) 1698 rt2x00_rf(rt2x00dev, RF3320))
1552 rt2800_config_channel_rf3xxx(rt2x00dev, conf, rf, info); 1699 rt2800_config_channel_rf3xxx(rt2x00dev, conf, rf, info);
1700 else if (rt2x00_rf(rt2x00dev, RF5390))
1701 rt2800_config_channel_rf53xx(rt2x00dev, conf, rf, info);
1553 else 1702 else
1554 rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info); 1703 rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info);
1555 1704
@@ -1562,12 +1711,14 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
1562 rt2800_bbp_write(rt2x00dev, 86, 0); 1711 rt2800_bbp_write(rt2x00dev, 86, 0);
1563 1712
1564 if (rf->channel <= 14) { 1713 if (rf->channel <= 14) {
1565 if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) { 1714 if (!rt2x00_rt(rt2x00dev, RT5390)) {
1566 rt2800_bbp_write(rt2x00dev, 82, 0x62); 1715 if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) {
1567 rt2800_bbp_write(rt2x00dev, 75, 0x46); 1716 rt2800_bbp_write(rt2x00dev, 82, 0x62);
1568 } else { 1717 rt2800_bbp_write(rt2x00dev, 75, 0x46);
1569 rt2800_bbp_write(rt2x00dev, 82, 0x84); 1718 } else {
1570 rt2800_bbp_write(rt2x00dev, 75, 0x50); 1719 rt2800_bbp_write(rt2x00dev, 82, 0x84);
1720 rt2800_bbp_write(rt2x00dev, 75, 0x50);
1721 }
1571 } 1722 }
1572 } else { 1723 } else {
1573 rt2800_bbp_write(rt2x00dev, 82, 0xf2); 1724 rt2800_bbp_write(rt2x00dev, 82, 0xf2);
@@ -1587,13 +1738,13 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
1587 tx_pin = 0; 1738 tx_pin = 0;
1588 1739
1589 /* Turn on unused PA or LNA when not using 1T or 1R */ 1740 /* Turn on unused PA or LNA when not using 1T or 1R */
1590 if (rt2x00dev->default_ant.tx != 1) { 1741 if (rt2x00dev->default_ant.tx_chain_num == 2) {
1591 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A1_EN, 1); 1742 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A1_EN, 1);
1592 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G1_EN, 1); 1743 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G1_EN, 1);
1593 } 1744 }
1594 1745
1595 /* Turn on unused PA or LNA when not using 1T or 1R */ 1746 /* Turn on unused PA or LNA when not using 1T or 1R */
1596 if (rt2x00dev->default_ant.rx != 1) { 1747 if (rt2x00dev->default_ant.rx_chain_num == 2) {
1597 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, 1); 1748 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, 1);
1598 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, 1); 1749 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, 1);
1599 } 1750 }
@@ -1637,30 +1788,116 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
1637 rt2800_register_read(rt2x00dev, CH_BUSY_STA_SEC, &reg); 1788 rt2800_register_read(rt2x00dev, CH_BUSY_STA_SEC, &reg);
1638} 1789}
1639 1790
1791static int rt2800_get_txpower_bw_comp(struct rt2x00_dev *rt2x00dev,
1792 enum ieee80211_band band)
1793{
1794 u16 eeprom;
1795 u8 comp_en;
1796 u8 comp_type;
1797 int comp_value;
1798
1799 rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_DELTA, &eeprom);
1800
1801 if (eeprom == 0xffff)
1802 return 0;
1803
1804 if (band == IEEE80211_BAND_2GHZ) {
1805 comp_en = rt2x00_get_field16(eeprom,
1806 EEPROM_TXPOWER_DELTA_ENABLE_2G);
1807 if (comp_en) {
1808 comp_type = rt2x00_get_field16(eeprom,
1809 EEPROM_TXPOWER_DELTA_TYPE_2G);
1810 comp_value = rt2x00_get_field16(eeprom,
1811 EEPROM_TXPOWER_DELTA_VALUE_2G);
1812 if (!comp_type)
1813 comp_value = -comp_value;
1814 }
1815 } else {
1816 comp_en = rt2x00_get_field16(eeprom,
1817 EEPROM_TXPOWER_DELTA_ENABLE_5G);
1818 if (comp_en) {
1819 comp_type = rt2x00_get_field16(eeprom,
1820 EEPROM_TXPOWER_DELTA_TYPE_5G);
1821 comp_value = rt2x00_get_field16(eeprom,
1822 EEPROM_TXPOWER_DELTA_VALUE_5G);
1823 if (!comp_type)
1824 comp_value = -comp_value;
1825 }
1826 }
1827
1828 return comp_value;
1829}
1830
1831static u8 rt2800_compesate_txpower(struct rt2x00_dev *rt2x00dev,
1832 int is_rate_b,
1833 enum ieee80211_band band,
1834 int power_level,
1835 u8 txpower)
1836{
1837 u32 reg;
1838 u16 eeprom;
1839 u8 criterion;
1840 u8 eirp_txpower;
1841 u8 eirp_txpower_criterion;
1842 u8 reg_limit;
1843 int bw_comp = 0;
1844
1845 if (!((band == IEEE80211_BAND_5GHZ) && is_rate_b))
1846 return txpower;
1847
1848 if (test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags))
1849 bw_comp = rt2800_get_txpower_bw_comp(rt2x00dev, band);
1850
1851 if (test_bit(CONFIG_SUPPORT_POWER_LIMIT, &rt2x00dev->flags)) {
1852 /*
1853 * Check if eirp txpower exceed txpower_limit.
1854 * We use OFDM 6M as criterion and its eirp txpower
1855 * is stored at EEPROM_EIRP_MAX_TX_POWER.
1856 * .11b data rate need add additional 4dbm
1857 * when calculating eirp txpower.
1858 */
1859 rt2800_register_read(rt2x00dev, TX_PWR_CFG_0, &reg);
1860 criterion = rt2x00_get_field32(reg, TX_PWR_CFG_0_6MBS);
1861
1862 rt2x00_eeprom_read(rt2x00dev,
1863 EEPROM_EIRP_MAX_TX_POWER, &eeprom);
1864
1865 if (band == IEEE80211_BAND_2GHZ)
1866 eirp_txpower_criterion = rt2x00_get_field16(eeprom,
1867 EEPROM_EIRP_MAX_TX_POWER_2GHZ);
1868 else
1869 eirp_txpower_criterion = rt2x00_get_field16(eeprom,
1870 EEPROM_EIRP_MAX_TX_POWER_5GHZ);
1871
1872 eirp_txpower = eirp_txpower_criterion + (txpower - criterion) +
1873 (is_rate_b ? 4 : 0) + bw_comp;
1874
1875 reg_limit = (eirp_txpower > power_level) ?
1876 (eirp_txpower - power_level) : 0;
1877 } else
1878 reg_limit = 0;
1879
1880 return txpower + bw_comp - reg_limit;
1881}
1882
1640static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev, 1883static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
1641 const int max_txpower) 1884 struct ieee80211_conf *conf)
1642{ 1885{
1643 u8 txpower; 1886 u8 txpower;
1644 u8 max_value = (u8)max_txpower;
1645 u16 eeprom; 1887 u16 eeprom;
1646 int i; 1888 int i, is_rate_b;
1647 u32 reg; 1889 u32 reg;
1648 u8 r1; 1890 u8 r1;
1649 u32 offset; 1891 u32 offset;
1892 enum ieee80211_band band = conf->channel->band;
1893 int power_level = conf->power_level;
1650 1894
1651 /* 1895 /*
1652 * set to normal tx power mode: +/- 0dBm 1896 * set to normal bbp tx power control mode: +/- 0dBm
1653 */ 1897 */
1654 rt2800_bbp_read(rt2x00dev, 1, &r1); 1898 rt2800_bbp_read(rt2x00dev, 1, &r1);
1655 rt2x00_set_field8(&r1, BBP1_TX_POWER, 0); 1899 rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, 0);
1656 rt2800_bbp_write(rt2x00dev, 1, r1); 1900 rt2800_bbp_write(rt2x00dev, 1, r1);
1657
1658 /*
1659 * The eeprom contains the tx power values for each rate. These
1660 * values map to 100% tx power. Each 16bit word contains four tx
1661 * power values and the order is the same as used in the TX_PWR_CFG
1662 * registers.
1663 */
1664 offset = TX_PWR_CFG_0; 1901 offset = TX_PWR_CFG_0;
1665 1902
1666 for (i = 0; i < EEPROM_TXPOWER_BYRATE_SIZE; i += 2) { 1903 for (i = 0; i < EEPROM_TXPOWER_BYRATE_SIZE; i += 2) {
@@ -1674,73 +1911,99 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
1674 rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + i, 1911 rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + i,
1675 &eeprom); 1912 &eeprom);
1676 1913
1677 /* TX_PWR_CFG_0: 1MBS, TX_PWR_CFG_1: 24MBS, 1914 is_rate_b = i ? 0 : 1;
1915 /*
1916 * TX_PWR_CFG_0: 1MBS, TX_PWR_CFG_1: 24MBS,
1678 * TX_PWR_CFG_2: MCS4, TX_PWR_CFG_3: MCS12, 1917 * TX_PWR_CFG_2: MCS4, TX_PWR_CFG_3: MCS12,
1679 * TX_PWR_CFG_4: unknown */ 1918 * TX_PWR_CFG_4: unknown
1919 */
1680 txpower = rt2x00_get_field16(eeprom, 1920 txpower = rt2x00_get_field16(eeprom,
1681 EEPROM_TXPOWER_BYRATE_RATE0); 1921 EEPROM_TXPOWER_BYRATE_RATE0);
1682 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE0, 1922 txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
1683 min(txpower, max_value)); 1923 power_level, txpower);
1924 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE0, txpower);
1684 1925
1685 /* TX_PWR_CFG_0: 2MBS, TX_PWR_CFG_1: 36MBS, 1926 /*
1927 * TX_PWR_CFG_0: 2MBS, TX_PWR_CFG_1: 36MBS,
1686 * TX_PWR_CFG_2: MCS5, TX_PWR_CFG_3: MCS13, 1928 * TX_PWR_CFG_2: MCS5, TX_PWR_CFG_3: MCS13,
1687 * TX_PWR_CFG_4: unknown */ 1929 * TX_PWR_CFG_4: unknown
1930 */
1688 txpower = rt2x00_get_field16(eeprom, 1931 txpower = rt2x00_get_field16(eeprom,
1689 EEPROM_TXPOWER_BYRATE_RATE1); 1932 EEPROM_TXPOWER_BYRATE_RATE1);
1690 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE1, 1933 txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
1691 min(txpower, max_value)); 1934 power_level, txpower);
1935 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE1, txpower);
1692 1936
1693 /* TX_PWR_CFG_0: 55MBS, TX_PWR_CFG_1: 48MBS, 1937 /*
1938 * TX_PWR_CFG_0: 5.5MBS, TX_PWR_CFG_1: 48MBS,
1694 * TX_PWR_CFG_2: MCS6, TX_PWR_CFG_3: MCS14, 1939 * TX_PWR_CFG_2: MCS6, TX_PWR_CFG_3: MCS14,
1695 * TX_PWR_CFG_4: unknown */ 1940 * TX_PWR_CFG_4: unknown
1941 */
1696 txpower = rt2x00_get_field16(eeprom, 1942 txpower = rt2x00_get_field16(eeprom,
1697 EEPROM_TXPOWER_BYRATE_RATE2); 1943 EEPROM_TXPOWER_BYRATE_RATE2);
1698 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE2, 1944 txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
1699 min(txpower, max_value)); 1945 power_level, txpower);
1946 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE2, txpower);
1700 1947
1701 /* TX_PWR_CFG_0: 11MBS, TX_PWR_CFG_1: 54MBS, 1948 /*
1949 * TX_PWR_CFG_0: 11MBS, TX_PWR_CFG_1: 54MBS,
1702 * TX_PWR_CFG_2: MCS7, TX_PWR_CFG_3: MCS15, 1950 * TX_PWR_CFG_2: MCS7, TX_PWR_CFG_3: MCS15,
1703 * TX_PWR_CFG_4: unknown */ 1951 * TX_PWR_CFG_4: unknown
1952 */
1704 txpower = rt2x00_get_field16(eeprom, 1953 txpower = rt2x00_get_field16(eeprom,
1705 EEPROM_TXPOWER_BYRATE_RATE3); 1954 EEPROM_TXPOWER_BYRATE_RATE3);
1706 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE3, 1955 txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
1707 min(txpower, max_value)); 1956 power_level, txpower);
1957 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE3, txpower);
1708 1958
1709 /* read the next four txpower values */ 1959 /* read the next four txpower values */
1710 rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + i + 1, 1960 rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + i + 1,
1711 &eeprom); 1961 &eeprom);
1712 1962
1713 /* TX_PWR_CFG_0: 6MBS, TX_PWR_CFG_1: MCS0, 1963 is_rate_b = 0;
1964 /*
1965 * TX_PWR_CFG_0: 6MBS, TX_PWR_CFG_1: MCS0,
1714 * TX_PWR_CFG_2: MCS8, TX_PWR_CFG_3: unknown, 1966 * TX_PWR_CFG_2: MCS8, TX_PWR_CFG_3: unknown,
1715 * TX_PWR_CFG_4: unknown */ 1967 * TX_PWR_CFG_4: unknown
1968 */
1716 txpower = rt2x00_get_field16(eeprom, 1969 txpower = rt2x00_get_field16(eeprom,
1717 EEPROM_TXPOWER_BYRATE_RATE0); 1970 EEPROM_TXPOWER_BYRATE_RATE0);
1718 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE4, 1971 txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
1719 min(txpower, max_value)); 1972 power_level, txpower);
1973 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE4, txpower);
1720 1974
1721 /* TX_PWR_CFG_0: 9MBS, TX_PWR_CFG_1: MCS1, 1975 /*
1976 * TX_PWR_CFG_0: 9MBS, TX_PWR_CFG_1: MCS1,
1722 * TX_PWR_CFG_2: MCS9, TX_PWR_CFG_3: unknown, 1977 * TX_PWR_CFG_2: MCS9, TX_PWR_CFG_3: unknown,
1723 * TX_PWR_CFG_4: unknown */ 1978 * TX_PWR_CFG_4: unknown
1979 */
1724 txpower = rt2x00_get_field16(eeprom, 1980 txpower = rt2x00_get_field16(eeprom,
1725 EEPROM_TXPOWER_BYRATE_RATE1); 1981 EEPROM_TXPOWER_BYRATE_RATE1);
1726 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE5, 1982 txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
1727 min(txpower, max_value)); 1983 power_level, txpower);
1984 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE5, txpower);
1728 1985
1729 /* TX_PWR_CFG_0: 12MBS, TX_PWR_CFG_1: MCS2, 1986 /*
1987 * TX_PWR_CFG_0: 12MBS, TX_PWR_CFG_1: MCS2,
1730 * TX_PWR_CFG_2: MCS10, TX_PWR_CFG_3: unknown, 1988 * TX_PWR_CFG_2: MCS10, TX_PWR_CFG_3: unknown,
1731 * TX_PWR_CFG_4: unknown */ 1989 * TX_PWR_CFG_4: unknown
1990 */
1732 txpower = rt2x00_get_field16(eeprom, 1991 txpower = rt2x00_get_field16(eeprom,
1733 EEPROM_TXPOWER_BYRATE_RATE2); 1992 EEPROM_TXPOWER_BYRATE_RATE2);
1734 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE6, 1993 txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
1735 min(txpower, max_value)); 1994 power_level, txpower);
1995 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE6, txpower);
1736 1996
1737 /* TX_PWR_CFG_0: 18MBS, TX_PWR_CFG_1: MCS3, 1997 /*
1998 * TX_PWR_CFG_0: 18MBS, TX_PWR_CFG_1: MCS3,
1738 * TX_PWR_CFG_2: MCS11, TX_PWR_CFG_3: unknown, 1999 * TX_PWR_CFG_2: MCS11, TX_PWR_CFG_3: unknown,
1739 * TX_PWR_CFG_4: unknown */ 2000 * TX_PWR_CFG_4: unknown
2001 */
1740 txpower = rt2x00_get_field16(eeprom, 2002 txpower = rt2x00_get_field16(eeprom,
1741 EEPROM_TXPOWER_BYRATE_RATE3); 2003 EEPROM_TXPOWER_BYRATE_RATE3);
1742 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE7, 2004 txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band,
1743 min(txpower, max_value)); 2005 power_level, txpower);
2006 rt2x00_set_field32(&reg, TX_PWR_CFG_RATE7, txpower);
1744 2007
1745 rt2800_register_write(rt2x00dev, offset, reg); 2008 rt2800_register_write(rt2x00dev, offset, reg);
1746 2009
@@ -1799,11 +2062,13 @@ void rt2800_config(struct rt2x00_dev *rt2x00dev,
1799 /* Always recalculate LNA gain before changing configuration */ 2062 /* Always recalculate LNA gain before changing configuration */
1800 rt2800_config_lna_gain(rt2x00dev, libconf); 2063 rt2800_config_lna_gain(rt2x00dev, libconf);
1801 2064
1802 if (flags & IEEE80211_CONF_CHANGE_CHANNEL) 2065 if (flags & IEEE80211_CONF_CHANGE_CHANNEL) {
1803 rt2800_config_channel(rt2x00dev, libconf->conf, 2066 rt2800_config_channel(rt2x00dev, libconf->conf,
1804 &libconf->rf, &libconf->channel); 2067 &libconf->rf, &libconf->channel);
2068 rt2800_config_txpower(rt2x00dev, libconf->conf);
2069 }
1805 if (flags & IEEE80211_CONF_CHANGE_POWER) 2070 if (flags & IEEE80211_CONF_CHANGE_POWER)
1806 rt2800_config_txpower(rt2x00dev, libconf->conf->power_level); 2071 rt2800_config_txpower(rt2x00dev, libconf->conf);
1807 if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS) 2072 if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
1808 rt2800_config_retry_limit(rt2x00dev, libconf); 2073 rt2800_config_retry_limit(rt2x00dev, libconf);
1809 if (flags & IEEE80211_CONF_CHANGE_PS) 2074 if (flags & IEEE80211_CONF_CHANGE_PS)
@@ -1832,7 +2097,8 @@ static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
1832 if (rt2x00_rt(rt2x00dev, RT3070) || 2097 if (rt2x00_rt(rt2x00dev, RT3070) ||
1833 rt2x00_rt(rt2x00dev, RT3071) || 2098 rt2x00_rt(rt2x00dev, RT3071) ||
1834 rt2x00_rt(rt2x00dev, RT3090) || 2099 rt2x00_rt(rt2x00dev, RT3090) ||
1835 rt2x00_rt(rt2x00dev, RT3390)) 2100 rt2x00_rt(rt2x00dev, RT3390) ||
2101 rt2x00_rt(rt2x00dev, RT5390))
1836 return 0x1c + (2 * rt2x00dev->lna_gain); 2102 return 0x1c + (2 * rt2x00dev->lna_gain);
1837 else 2103 else
1838 return 0x2e + rt2x00dev->lna_gain; 2104 return 0x2e + rt2x00dev->lna_gain;
@@ -1964,6 +2230,10 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1964 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400); 2230 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
1965 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000); 2231 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
1966 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x0000001f); 2232 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x0000001f);
2233 } else if (rt2x00_rt(rt2x00dev, RT5390)) {
2234 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404);
2235 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
2236 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
1967 } else { 2237 } else {
1968 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000); 2238 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000);
1969 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606); 2239 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
@@ -2032,7 +2302,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
2032 rt2800_register_read(rt2x00dev, CCK_PROT_CFG, &reg); 2302 rt2800_register_read(rt2x00dev, CCK_PROT_CFG, &reg);
2033 rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_RATE, 3); 2303 rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_RATE, 3);
2034 rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_CTRL, 0); 2304 rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_CTRL, 0);
2035 rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_NAV, 1); 2305 rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_NAV_SHORT, 1);
2036 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_CCK, 1); 2306 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_CCK, 1);
2037 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_OFDM, 1); 2307 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
2038 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM20, 1); 2308 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM20, 1);
@@ -2045,7 +2315,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
2045 rt2800_register_read(rt2x00dev, OFDM_PROT_CFG, &reg); 2315 rt2800_register_read(rt2x00dev, OFDM_PROT_CFG, &reg);
2046 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_RATE, 3); 2316 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_RATE, 3);
2047 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_CTRL, 0); 2317 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_CTRL, 0);
2048 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_NAV, 1); 2318 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_NAV_SHORT, 1);
2049 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_CCK, 1); 2319 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_CCK, 1);
2050 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_OFDM, 1); 2320 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
2051 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM20, 1); 2321 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM20, 1);
@@ -2058,7 +2328,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
2058 rt2800_register_read(rt2x00dev, MM20_PROT_CFG, &reg); 2328 rt2800_register_read(rt2x00dev, MM20_PROT_CFG, &reg);
2059 rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_RATE, 0x4004); 2329 rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_RATE, 0x4004);
2060 rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_CTRL, 0); 2330 rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_CTRL, 0);
2061 rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_NAV, 1); 2331 rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_NAV_SHORT, 1);
2062 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_CCK, 1); 2332 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_CCK, 1);
2063 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_OFDM, 1); 2333 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
2064 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_MM20, 1); 2334 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_MM20, 1);
@@ -2071,7 +2341,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
2071 rt2800_register_read(rt2x00dev, MM40_PROT_CFG, &reg); 2341 rt2800_register_read(rt2x00dev, MM40_PROT_CFG, &reg);
2072 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_RATE, 0x4084); 2342 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_RATE, 0x4084);
2073 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_CTRL, 0); 2343 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_CTRL, 0);
2074 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_NAV, 1); 2344 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_NAV_SHORT, 1);
2075 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_CCK, 1); 2345 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_CCK, 1);
2076 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_OFDM, 1); 2346 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
2077 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_MM20, 1); 2347 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_MM20, 1);
@@ -2084,7 +2354,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
2084 rt2800_register_read(rt2x00dev, GF20_PROT_CFG, &reg); 2354 rt2800_register_read(rt2x00dev, GF20_PROT_CFG, &reg);
2085 rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_RATE, 0x4004); 2355 rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_RATE, 0x4004);
2086 rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_CTRL, 0); 2356 rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_CTRL, 0);
2087 rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_NAV, 1); 2357 rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_NAV_SHORT, 1);
2088 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_CCK, 1); 2358 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_CCK, 1);
2089 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_OFDM, 1); 2359 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
2090 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_MM20, 1); 2360 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_MM20, 1);
@@ -2097,7 +2367,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
2097 rt2800_register_read(rt2x00dev, GF40_PROT_CFG, &reg); 2367 rt2800_register_read(rt2x00dev, GF40_PROT_CFG, &reg);
2098 rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_RATE, 0x4084); 2368 rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_RATE, 0x4084);
2099 rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_CTRL, 0); 2369 rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_CTRL, 0);
2100 rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_NAV, 1); 2370 rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_NAV_SHORT, 1);
2101 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_CCK, 1); 2371 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_CCK, 1);
2102 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_OFDM, 1); 2372 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
2103 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_MM20, 1); 2373 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_MM20, 1);
@@ -2187,19 +2457,23 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
2187 /* 2457 /*
2188 * Clear all beacons 2458 * Clear all beacons
2189 */ 2459 */
2190 rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE0); 2460 rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE0);
2191 rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE1); 2461 rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE1);
2192 rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE2); 2462 rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE2);
2193 rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE3); 2463 rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE3);
2194 rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE4); 2464 rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE4);
2195 rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE5); 2465 rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE5);
2196 rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE6); 2466 rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE6);
2197 rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE7); 2467 rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE7);
2198 2468
2199 if (rt2x00_is_usb(rt2x00dev)) { 2469 if (rt2x00_is_usb(rt2x00dev)) {
2200 rt2800_register_read(rt2x00dev, US_CYC_CNT, &reg); 2470 rt2800_register_read(rt2x00dev, US_CYC_CNT, &reg);
2201 rt2x00_set_field32(&reg, US_CYC_CNT_CLOCK_CYCLE, 30); 2471 rt2x00_set_field32(&reg, US_CYC_CNT_CLOCK_CYCLE, 30);
2202 rt2800_register_write(rt2x00dev, US_CYC_CNT, reg); 2472 rt2800_register_write(rt2x00dev, US_CYC_CNT, reg);
2473 } else if (rt2x00_is_pcie(rt2x00dev)) {
2474 rt2800_register_read(rt2x00dev, US_CYC_CNT, &reg);
2475 rt2x00_set_field32(&reg, US_CYC_CNT_CLOCK_CYCLE, 125);
2476 rt2800_register_write(rt2x00dev, US_CYC_CNT, reg);
2203 } 2477 }
2204 2478
2205 rt2800_register_read(rt2x00dev, HT_FBK_CFG0, &reg); 2479 rt2800_register_read(rt2x00dev, HT_FBK_CFG0, &reg);
@@ -2335,15 +2609,31 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
2335 rt2800_wait_bbp_ready(rt2x00dev))) 2609 rt2800_wait_bbp_ready(rt2x00dev)))
2336 return -EACCES; 2610 return -EACCES;
2337 2611
2338 if (rt2800_is_305x_soc(rt2x00dev)) 2612 if (rt2x00_rt(rt2x00dev, RT5390)) {
2613 rt2800_bbp_read(rt2x00dev, 4, &value);
2614 rt2x00_set_field8(&value, BBP4_MAC_IF_CTRL, 1);
2615 rt2800_bbp_write(rt2x00dev, 4, value);
2616 }
2617
2618 if (rt2800_is_305x_soc(rt2x00dev) ||
2619 rt2x00_rt(rt2x00dev, RT5390))
2339 rt2800_bbp_write(rt2x00dev, 31, 0x08); 2620 rt2800_bbp_write(rt2x00dev, 31, 0x08);
2340 2621
2341 rt2800_bbp_write(rt2x00dev, 65, 0x2c); 2622 rt2800_bbp_write(rt2x00dev, 65, 0x2c);
2342 rt2800_bbp_write(rt2x00dev, 66, 0x38); 2623 rt2800_bbp_write(rt2x00dev, 66, 0x38);
2343 2624
2625 if (rt2x00_rt(rt2x00dev, RT5390))
2626 rt2800_bbp_write(rt2x00dev, 68, 0x0b);
2627
2344 if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) { 2628 if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) {
2345 rt2800_bbp_write(rt2x00dev, 69, 0x16); 2629 rt2800_bbp_write(rt2x00dev, 69, 0x16);
2346 rt2800_bbp_write(rt2x00dev, 73, 0x12); 2630 rt2800_bbp_write(rt2x00dev, 73, 0x12);
2631 } else if (rt2x00_rt(rt2x00dev, RT5390)) {
2632 rt2800_bbp_write(rt2x00dev, 69, 0x12);
2633 rt2800_bbp_write(rt2x00dev, 73, 0x13);
2634 rt2800_bbp_write(rt2x00dev, 75, 0x46);
2635 rt2800_bbp_write(rt2x00dev, 76, 0x28);
2636 rt2800_bbp_write(rt2x00dev, 77, 0x59);
2347 } else { 2637 } else {
2348 rt2800_bbp_write(rt2x00dev, 69, 0x12); 2638 rt2800_bbp_write(rt2x00dev, 69, 0x12);
2349 rt2800_bbp_write(rt2x00dev, 73, 0x10); 2639 rt2800_bbp_write(rt2x00dev, 73, 0x10);
@@ -2354,7 +2644,8 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
2354 if (rt2x00_rt(rt2x00dev, RT3070) || 2644 if (rt2x00_rt(rt2x00dev, RT3070) ||
2355 rt2x00_rt(rt2x00dev, RT3071) || 2645 rt2x00_rt(rt2x00dev, RT3071) ||
2356 rt2x00_rt(rt2x00dev, RT3090) || 2646 rt2x00_rt(rt2x00dev, RT3090) ||
2357 rt2x00_rt(rt2x00dev, RT3390)) { 2647 rt2x00_rt(rt2x00dev, RT3390) ||
2648 rt2x00_rt(rt2x00dev, RT5390)) {
2358 rt2800_bbp_write(rt2x00dev, 79, 0x13); 2649 rt2800_bbp_write(rt2x00dev, 79, 0x13);
2359 rt2800_bbp_write(rt2x00dev, 80, 0x05); 2650 rt2800_bbp_write(rt2x00dev, 80, 0x05);
2360 rt2800_bbp_write(rt2x00dev, 81, 0x33); 2651 rt2800_bbp_write(rt2x00dev, 81, 0x33);
@@ -2366,35 +2657,62 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
2366 } 2657 }
2367 2658
2368 rt2800_bbp_write(rt2x00dev, 82, 0x62); 2659 rt2800_bbp_write(rt2x00dev, 82, 0x62);
2369 rt2800_bbp_write(rt2x00dev, 83, 0x6a); 2660 if (rt2x00_rt(rt2x00dev, RT5390))
2661 rt2800_bbp_write(rt2x00dev, 83, 0x7a);
2662 else
2663 rt2800_bbp_write(rt2x00dev, 83, 0x6a);
2370 2664
2371 if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D)) 2665 if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D))
2372 rt2800_bbp_write(rt2x00dev, 84, 0x19); 2666 rt2800_bbp_write(rt2x00dev, 84, 0x19);
2667 else if (rt2x00_rt(rt2x00dev, RT5390))
2668 rt2800_bbp_write(rt2x00dev, 84, 0x9a);
2373 else 2669 else
2374 rt2800_bbp_write(rt2x00dev, 84, 0x99); 2670 rt2800_bbp_write(rt2x00dev, 84, 0x99);
2375 2671
2376 rt2800_bbp_write(rt2x00dev, 86, 0x00); 2672 if (rt2x00_rt(rt2x00dev, RT5390))
2673 rt2800_bbp_write(rt2x00dev, 86, 0x38);
2674 else
2675 rt2800_bbp_write(rt2x00dev, 86, 0x00);
2676
2377 rt2800_bbp_write(rt2x00dev, 91, 0x04); 2677 rt2800_bbp_write(rt2x00dev, 91, 0x04);
2378 rt2800_bbp_write(rt2x00dev, 92, 0x00); 2678
2679 if (rt2x00_rt(rt2x00dev, RT5390))
2680 rt2800_bbp_write(rt2x00dev, 92, 0x02);
2681 else
2682 rt2800_bbp_write(rt2x00dev, 92, 0x00);
2379 2683
2380 if (rt2x00_rt_rev_gte(rt2x00dev, RT3070, REV_RT3070F) || 2684 if (rt2x00_rt_rev_gte(rt2x00dev, RT3070, REV_RT3070F) ||
2381 rt2x00_rt_rev_gte(rt2x00dev, RT3071, REV_RT3071E) || 2685 rt2x00_rt_rev_gte(rt2x00dev, RT3071, REV_RT3071E) ||
2382 rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E) || 2686 rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E) ||
2383 rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E) || 2687 rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E) ||
2688 rt2x00_rt(rt2x00dev, RT5390) ||
2384 rt2800_is_305x_soc(rt2x00dev)) 2689 rt2800_is_305x_soc(rt2x00dev))
2385 rt2800_bbp_write(rt2x00dev, 103, 0xc0); 2690 rt2800_bbp_write(rt2x00dev, 103, 0xc0);
2386 else 2691 else
2387 rt2800_bbp_write(rt2x00dev, 103, 0x00); 2692 rt2800_bbp_write(rt2x00dev, 103, 0x00);
2388 2693
2694 if (rt2x00_rt(rt2x00dev, RT5390))
2695 rt2800_bbp_write(rt2x00dev, 104, 0x92);
2696
2389 if (rt2800_is_305x_soc(rt2x00dev)) 2697 if (rt2800_is_305x_soc(rt2x00dev))
2390 rt2800_bbp_write(rt2x00dev, 105, 0x01); 2698 rt2800_bbp_write(rt2x00dev, 105, 0x01);
2699 else if (rt2x00_rt(rt2x00dev, RT5390))
2700 rt2800_bbp_write(rt2x00dev, 105, 0x3c);
2391 else 2701 else
2392 rt2800_bbp_write(rt2x00dev, 105, 0x05); 2702 rt2800_bbp_write(rt2x00dev, 105, 0x05);
2393 rt2800_bbp_write(rt2x00dev, 106, 0x35); 2703
2704 if (rt2x00_rt(rt2x00dev, RT5390))
2705 rt2800_bbp_write(rt2x00dev, 106, 0x03);
2706 else
2707 rt2800_bbp_write(rt2x00dev, 106, 0x35);
2708
2709 if (rt2x00_rt(rt2x00dev, RT5390))
2710 rt2800_bbp_write(rt2x00dev, 128, 0x12);
2394 2711
2395 if (rt2x00_rt(rt2x00dev, RT3071) || 2712 if (rt2x00_rt(rt2x00dev, RT3071) ||
2396 rt2x00_rt(rt2x00dev, RT3090) || 2713 rt2x00_rt(rt2x00dev, RT3090) ||
2397 rt2x00_rt(rt2x00dev, RT3390)) { 2714 rt2x00_rt(rt2x00dev, RT3390) ||
2715 rt2x00_rt(rt2x00dev, RT5390)) {
2398 rt2800_bbp_read(rt2x00dev, 138, &value); 2716 rt2800_bbp_read(rt2x00dev, 138, &value);
2399 2717
2400 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom); 2718 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
@@ -2406,6 +2724,41 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
2406 rt2800_bbp_write(rt2x00dev, 138, value); 2724 rt2800_bbp_write(rt2x00dev, 138, value);
2407 } 2725 }
2408 2726
2727 if (rt2x00_rt(rt2x00dev, RT5390)) {
2728 int ant, div_mode;
2729
2730 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
2731 div_mode = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_ANT_DIVERSITY);
2732 ant = (div_mode == 3) ? 1 : 0;
2733
2734 /* check if this is a Bluetooth combo card */
2735 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
2736 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_BT_COEXIST)) {
2737 u32 reg;
2738
2739 rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
2740 rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT3, 0);
2741 rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT6, 0);
2742 rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT3, 0);
2743 rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT6, 0);
2744 if (ant == 0)
2745 rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT3, 1);
2746 else if (ant == 1)
2747 rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT6, 1);
2748 rt2800_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
2749 }
2750
2751 rt2800_bbp_read(rt2x00dev, 152, &value);
2752 if (ant == 0)
2753 rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1);
2754 else
2755 rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0);
2756 rt2800_bbp_write(rt2x00dev, 152, value);
2757
2758 /* Init frequency calibration */
2759 rt2800_bbp_write(rt2x00dev, 142, 1);
2760 rt2800_bbp_write(rt2x00dev, 143, 57);
2761 }
2409 2762
2410 for (i = 0; i < EEPROM_BBP_SIZE; i++) { 2763 for (i = 0; i < EEPROM_BBP_SIZE; i++) {
2411 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom); 2764 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom);
@@ -2436,6 +2789,10 @@ static u8 rt2800_init_rx_filter(struct rt2x00_dev *rt2x00dev,
2436 rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 2 * bw40); 2789 rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 2 * bw40);
2437 rt2800_bbp_write(rt2x00dev, 4, bbp); 2790 rt2800_bbp_write(rt2x00dev, 4, bbp);
2438 2791
2792 rt2800_rfcsr_read(rt2x00dev, 31, &rfcsr);
2793 rt2x00_set_field8(&rfcsr, RFCSR31_RX_H20M, bw40);
2794 rt2800_rfcsr_write(rt2x00dev, 31, rfcsr);
2795
2439 rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr); 2796 rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr);
2440 rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 1); 2797 rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 1);
2441 rt2800_rfcsr_write(rt2x00dev, 22, rfcsr); 2798 rt2800_rfcsr_write(rt2x00dev, 22, rfcsr);
@@ -2491,18 +2848,28 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
2491 !rt2x00_rt(rt2x00dev, RT3071) && 2848 !rt2x00_rt(rt2x00dev, RT3071) &&
2492 !rt2x00_rt(rt2x00dev, RT3090) && 2849 !rt2x00_rt(rt2x00dev, RT3090) &&
2493 !rt2x00_rt(rt2x00dev, RT3390) && 2850 !rt2x00_rt(rt2x00dev, RT3390) &&
2851 !rt2x00_rt(rt2x00dev, RT5390) &&
2494 !rt2800_is_305x_soc(rt2x00dev)) 2852 !rt2800_is_305x_soc(rt2x00dev))
2495 return 0; 2853 return 0;
2496 2854
2497 /* 2855 /*
2498 * Init RF calibration. 2856 * Init RF calibration.
2499 */ 2857 */
2500 rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr); 2858 if (rt2x00_rt(rt2x00dev, RT5390)) {
2501 rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1); 2859 rt2800_rfcsr_read(rt2x00dev, 2, &rfcsr);
2502 rt2800_rfcsr_write(rt2x00dev, 30, rfcsr); 2860 rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 1);
2503 msleep(1); 2861 rt2800_rfcsr_write(rt2x00dev, 2, rfcsr);
2504 rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0); 2862 msleep(1);
2505 rt2800_rfcsr_write(rt2x00dev, 30, rfcsr); 2863 rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 0);
2864 rt2800_rfcsr_write(rt2x00dev, 2, rfcsr);
2865 } else {
2866 rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
2867 rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
2868 rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
2869 msleep(1);
2870 rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0);
2871 rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
2872 }
2506 2873
2507 if (rt2x00_rt(rt2x00dev, RT3070) || 2874 if (rt2x00_rt(rt2x00dev, RT3070) ||
2508 rt2x00_rt(rt2x00dev, RT3071) || 2875 rt2x00_rt(rt2x00dev, RT3071) ||
@@ -2510,7 +2877,7 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
2510 rt2800_rfcsr_write(rt2x00dev, 4, 0x40); 2877 rt2800_rfcsr_write(rt2x00dev, 4, 0x40);
2511 rt2800_rfcsr_write(rt2x00dev, 5, 0x03); 2878 rt2800_rfcsr_write(rt2x00dev, 5, 0x03);
2512 rt2800_rfcsr_write(rt2x00dev, 6, 0x02); 2879 rt2800_rfcsr_write(rt2x00dev, 6, 0x02);
2513 rt2800_rfcsr_write(rt2x00dev, 7, 0x70); 2880 rt2800_rfcsr_write(rt2x00dev, 7, 0x60);
2514 rt2800_rfcsr_write(rt2x00dev, 9, 0x0f); 2881 rt2800_rfcsr_write(rt2x00dev, 9, 0x0f);
2515 rt2800_rfcsr_write(rt2x00dev, 10, 0x41); 2882 rt2800_rfcsr_write(rt2x00dev, 10, 0x41);
2516 rt2800_rfcsr_write(rt2x00dev, 11, 0x21); 2883 rt2800_rfcsr_write(rt2x00dev, 11, 0x21);
@@ -2593,6 +2960,87 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
2593 rt2800_rfcsr_write(rt2x00dev, 30, 0x00); 2960 rt2800_rfcsr_write(rt2x00dev, 30, 0x00);
2594 rt2800_rfcsr_write(rt2x00dev, 31, 0x00); 2961 rt2800_rfcsr_write(rt2x00dev, 31, 0x00);
2595 return 0; 2962 return 0;
2963 } else if (rt2x00_rt(rt2x00dev, RT5390)) {
2964 rt2800_rfcsr_write(rt2x00dev, 1, 0x0f);
2965 rt2800_rfcsr_write(rt2x00dev, 2, 0x80);
2966 rt2800_rfcsr_write(rt2x00dev, 3, 0x88);
2967 rt2800_rfcsr_write(rt2x00dev, 5, 0x10);
2968 if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
2969 rt2800_rfcsr_write(rt2x00dev, 6, 0xe0);
2970 else
2971 rt2800_rfcsr_write(rt2x00dev, 6, 0xa0);
2972 rt2800_rfcsr_write(rt2x00dev, 7, 0x00);
2973 rt2800_rfcsr_write(rt2x00dev, 10, 0x53);
2974 rt2800_rfcsr_write(rt2x00dev, 11, 0x4a);
2975 rt2800_rfcsr_write(rt2x00dev, 12, 0xc6);
2976 rt2800_rfcsr_write(rt2x00dev, 13, 0x9f);
2977 rt2800_rfcsr_write(rt2x00dev, 14, 0x00);
2978 rt2800_rfcsr_write(rt2x00dev, 15, 0x00);
2979 rt2800_rfcsr_write(rt2x00dev, 16, 0x00);
2980 rt2800_rfcsr_write(rt2x00dev, 18, 0x03);
2981 rt2800_rfcsr_write(rt2x00dev, 19, 0x00);
2982
2983 rt2800_rfcsr_write(rt2x00dev, 20, 0x00);
2984 rt2800_rfcsr_write(rt2x00dev, 21, 0x00);
2985 rt2800_rfcsr_write(rt2x00dev, 22, 0x20);
2986 rt2800_rfcsr_write(rt2x00dev, 23, 0x00);
2987 rt2800_rfcsr_write(rt2x00dev, 24, 0x00);
2988 if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
2989 rt2800_rfcsr_write(rt2x00dev, 25, 0x80);
2990 else
2991 rt2800_rfcsr_write(rt2x00dev, 25, 0xc0);
2992 rt2800_rfcsr_write(rt2x00dev, 26, 0x00);
2993 rt2800_rfcsr_write(rt2x00dev, 27, 0x09);
2994 rt2800_rfcsr_write(rt2x00dev, 28, 0x00);
2995 rt2800_rfcsr_write(rt2x00dev, 29, 0x10);
2996
2997 rt2800_rfcsr_write(rt2x00dev, 30, 0x00);
2998 rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
2999 rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
3000 rt2800_rfcsr_write(rt2x00dev, 33, 0x00);
3001 rt2800_rfcsr_write(rt2x00dev, 34, 0x07);
3002 rt2800_rfcsr_write(rt2x00dev, 35, 0x12);
3003 rt2800_rfcsr_write(rt2x00dev, 36, 0x00);
3004 rt2800_rfcsr_write(rt2x00dev, 37, 0x08);
3005 rt2800_rfcsr_write(rt2x00dev, 38, 0x85);
3006 rt2800_rfcsr_write(rt2x00dev, 39, 0x1b);
3007
3008 if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
3009 rt2800_rfcsr_write(rt2x00dev, 40, 0x0b);
3010 else
3011 rt2800_rfcsr_write(rt2x00dev, 40, 0x4b);
3012 rt2800_rfcsr_write(rt2x00dev, 41, 0xbb);
3013 rt2800_rfcsr_write(rt2x00dev, 42, 0xd2);
3014 rt2800_rfcsr_write(rt2x00dev, 43, 0x9a);
3015 rt2800_rfcsr_write(rt2x00dev, 44, 0x0e);
3016 rt2800_rfcsr_write(rt2x00dev, 45, 0xa2);
3017 if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
3018 rt2800_rfcsr_write(rt2x00dev, 46, 0x73);
3019 else
3020 rt2800_rfcsr_write(rt2x00dev, 46, 0x7b);
3021 rt2800_rfcsr_write(rt2x00dev, 47, 0x00);
3022 rt2800_rfcsr_write(rt2x00dev, 48, 0x10);
3023 rt2800_rfcsr_write(rt2x00dev, 49, 0x94);
3024
3025 rt2800_rfcsr_write(rt2x00dev, 52, 0x38);
3026 if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
3027 rt2800_rfcsr_write(rt2x00dev, 53, 0x00);
3028 else
3029 rt2800_rfcsr_write(rt2x00dev, 53, 0x84);
3030 rt2800_rfcsr_write(rt2x00dev, 54, 0x78);
3031 rt2800_rfcsr_write(rt2x00dev, 55, 0x44);
3032 rt2800_rfcsr_write(rt2x00dev, 56, 0x22);
3033 rt2800_rfcsr_write(rt2x00dev, 57, 0x80);
3034 rt2800_rfcsr_write(rt2x00dev, 58, 0x7f);
3035 rt2800_rfcsr_write(rt2x00dev, 59, 0x63);
3036
3037 rt2800_rfcsr_write(rt2x00dev, 60, 0x45);
3038 if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
3039 rt2800_rfcsr_write(rt2x00dev, 61, 0xd1);
3040 else
3041 rt2800_rfcsr_write(rt2x00dev, 61, 0xdd);
3042 rt2800_rfcsr_write(rt2x00dev, 62, 0x00);
3043 rt2800_rfcsr_write(rt2x00dev, 63, 0x00);
2596 } 3044 }
2597 3045
2598 if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) { 3046 if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) {
@@ -2602,12 +3050,12 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
2602 rt2800_register_write(rt2x00dev, LDO_CFG0, reg); 3050 rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
2603 } else if (rt2x00_rt(rt2x00dev, RT3071) || 3051 } else if (rt2x00_rt(rt2x00dev, RT3071) ||
2604 rt2x00_rt(rt2x00dev, RT3090)) { 3052 rt2x00_rt(rt2x00dev, RT3090)) {
3053 rt2800_rfcsr_write(rt2x00dev, 31, 0x14);
3054
2605 rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr); 3055 rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr);
2606 rt2x00_set_field8(&rfcsr, RFCSR6_R2, 1); 3056 rt2x00_set_field8(&rfcsr, RFCSR6_R2, 1);
2607 rt2800_rfcsr_write(rt2x00dev, 6, rfcsr); 3057 rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
2608 3058
2609 rt2800_rfcsr_write(rt2x00dev, 31, 0x14);
2610
2611 rt2800_register_read(rt2x00dev, LDO_CFG0, &reg); 3059 rt2800_register_read(rt2x00dev, LDO_CFG0, &reg);
2612 rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1); 3060 rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1);
2613 if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) || 3061 if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
@@ -2619,6 +3067,10 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
2619 rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 0); 3067 rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 0);
2620 } 3068 }
2621 rt2800_register_write(rt2x00dev, LDO_CFG0, reg); 3069 rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
3070
3071 rt2800_register_read(rt2x00dev, GPIO_SWITCH, &reg);
3072 rt2x00_set_field32(&reg, GPIO_SWITCH_5, 0);
3073 rt2800_register_write(rt2x00dev, GPIO_SWITCH, reg);
2622 } else if (rt2x00_rt(rt2x00dev, RT3390)) { 3074 } else if (rt2x00_rt(rt2x00dev, RT3390)) {
2623 rt2800_register_read(rt2x00dev, GPIO_SWITCH, &reg); 3075 rt2800_register_read(rt2x00dev, GPIO_SWITCH, &reg);
2624 rt2x00_set_field32(&reg, GPIO_SWITCH_5, 0); 3076 rt2x00_set_field32(&reg, GPIO_SWITCH_5, 0);
@@ -2642,21 +3094,23 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
2642 rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x15); 3094 rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x15);
2643 } 3095 }
2644 3096
2645 /* 3097 if (!rt2x00_rt(rt2x00dev, RT5390)) {
2646 * Set back to initial state 3098 /*
2647 */ 3099 * Set back to initial state
2648 rt2800_bbp_write(rt2x00dev, 24, 0); 3100 */
3101 rt2800_bbp_write(rt2x00dev, 24, 0);
2649 3102
2650 rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr); 3103 rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr);
2651 rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 0); 3104 rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 0);
2652 rt2800_rfcsr_write(rt2x00dev, 22, rfcsr); 3105 rt2800_rfcsr_write(rt2x00dev, 22, rfcsr);
2653 3106
2654 /* 3107 /*
2655 * set BBP back to BW20 3108 * Set BBP back to BW20
2656 */ 3109 */
2657 rt2800_bbp_read(rt2x00dev, 4, &bbp); 3110 rt2800_bbp_read(rt2x00dev, 4, &bbp);
2658 rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 0); 3111 rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 0);
2659 rt2800_bbp_write(rt2x00dev, 4, bbp); 3112 rt2800_bbp_write(rt2x00dev, 4, bbp);
3113 }
2660 3114
2661 if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F) || 3115 if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F) ||
2662 rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) || 3116 rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
@@ -2668,24 +3122,28 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
2668 rt2x00_set_field32(&reg, OPT_14_CSR_BIT0, 1); 3122 rt2x00_set_field32(&reg, OPT_14_CSR_BIT0, 1);
2669 rt2800_register_write(rt2x00dev, OPT_14_CSR, reg); 3123 rt2800_register_write(rt2x00dev, OPT_14_CSR, reg);
2670 3124
2671 rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr); 3125 if (!rt2x00_rt(rt2x00dev, RT5390)) {
2672 rt2x00_set_field8(&rfcsr, RFCSR17_TX_LO1_EN, 0); 3126 rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
2673 if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) || 3127 rt2x00_set_field8(&rfcsr, RFCSR17_TX_LO1_EN, 0);
2674 rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) || 3128 if (rt2x00_rt(rt2x00dev, RT3070) ||
2675 rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) { 3129 rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
2676 if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) 3130 rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
2677 rt2x00_set_field8(&rfcsr, RFCSR17_R, 1); 3131 rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
2678 } 3132 if (!test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags))
2679 rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &eeprom); 3133 rt2x00_set_field8(&rfcsr, RFCSR17_R, 1);
2680 if (rt2x00_get_field16(eeprom, EEPROM_TXMIXER_GAIN_BG_VAL) >= 1) 3134 }
2681 rt2x00_set_field8(&rfcsr, RFCSR17_TXMIXER_GAIN, 3135 rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &eeprom);
2682 rt2x00_get_field16(eeprom, 3136 if (rt2x00_get_field16(eeprom, EEPROM_TXMIXER_GAIN_BG_VAL) >= 1)
2683 EEPROM_TXMIXER_GAIN_BG_VAL)); 3137 rt2x00_set_field8(&rfcsr, RFCSR17_TXMIXER_GAIN,
2684 rt2800_rfcsr_write(rt2x00dev, 17, rfcsr); 3138 rt2x00_get_field16(eeprom,
3139 EEPROM_TXMIXER_GAIN_BG_VAL));
3140 rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
3141 }
2685 3142
2686 if (rt2x00_rt(rt2x00dev, RT3090)) { 3143 if (rt2x00_rt(rt2x00dev, RT3090)) {
2687 rt2800_bbp_read(rt2x00dev, 138, &bbp); 3144 rt2800_bbp_read(rt2x00dev, 138, &bbp);
2688 3145
3146 /* Turn off unused DAC1 and ADC1 to reduce power consumption */
2689 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom); 3147 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
2690 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1) 3148 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1)
2691 rt2x00_set_field8(&bbp, BBP138_RX_ADC1, 0); 3149 rt2x00_set_field8(&bbp, BBP138_RX_ADC1, 0);
@@ -2719,10 +3177,9 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
2719 rt2800_rfcsr_write(rt2x00dev, 21, rfcsr); 3177 rt2800_rfcsr_write(rt2x00dev, 21, rfcsr);
2720 } 3178 }
2721 3179
2722 if (rt2x00_rt(rt2x00dev, RT3070) || rt2x00_rt(rt2x00dev, RT3071)) { 3180 if (rt2x00_rt(rt2x00dev, RT3070)) {
2723 rt2800_rfcsr_read(rt2x00dev, 27, &rfcsr); 3181 rt2800_rfcsr_read(rt2x00dev, 27, &rfcsr);
2724 if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F) || 3182 if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F))
2725 rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E))
2726 rt2x00_set_field8(&rfcsr, RFCSR27_R1, 3); 3183 rt2x00_set_field8(&rfcsr, RFCSR27_R1, 3);
2727 else 3184 else
2728 rt2x00_set_field8(&rfcsr, RFCSR27_R1, 0); 3185 rt2x00_set_field8(&rfcsr, RFCSR27_R1, 0);
@@ -2732,6 +3189,20 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
2732 rt2800_rfcsr_write(rt2x00dev, 27, rfcsr); 3189 rt2800_rfcsr_write(rt2x00dev, 27, rfcsr);
2733 } 3190 }
2734 3191
3192 if (rt2x00_rt(rt2x00dev, RT5390)) {
3193 rt2800_rfcsr_read(rt2x00dev, 38, &rfcsr);
3194 rt2x00_set_field8(&rfcsr, RFCSR38_RX_LO1_EN, 0);
3195 rt2800_rfcsr_write(rt2x00dev, 38, rfcsr);
3196
3197 rt2800_rfcsr_read(rt2x00dev, 39, &rfcsr);
3198 rt2x00_set_field8(&rfcsr, RFCSR39_RX_LO2_EN, 0);
3199 rt2800_rfcsr_write(rt2x00dev, 39, rfcsr);
3200
3201 rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
3202 rt2x00_set_field8(&rfcsr, RFCSR30_RX_VCM, 2);
3203 rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
3204 }
3205
2735 return 0; 3206 return 0;
2736} 3207}
2737 3208
@@ -2810,10 +3281,7 @@ void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev)
2810 3281
2811 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg); 3282 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
2812 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0); 3283 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
2813 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
2814 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0); 3284 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
2815 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
2816 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
2817 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg); 3285 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
2818 3286
2819 /* Wait for DMA, ignore error */ 3287 /* Wait for DMA, ignore error */
@@ -2823,9 +3291,6 @@ void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev)
2823 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 0); 3291 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 0);
2824 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0); 3292 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
2825 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg); 3293 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
2826
2827 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0);
2828 rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0);
2829} 3294}
2830EXPORT_SYMBOL_GPL(rt2800_disable_radio); 3295EXPORT_SYMBOL_GPL(rt2800_disable_radio);
2831 3296
@@ -2986,13 +3451,6 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
2986 default_lna_gain); 3451 default_lna_gain);
2987 rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A2, word); 3452 rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A2, word);
2988 3453
2989 rt2x00_eeprom_read(rt2x00dev, EEPROM_MAX_TX_POWER, &word);
2990 if (rt2x00_get_field16(word, EEPROM_MAX_TX_POWER_24GHZ) == 0xff)
2991 rt2x00_set_field16(&word, EEPROM_MAX_TX_POWER_24GHZ, MAX_G_TXPOWER);
2992 if (rt2x00_get_field16(word, EEPROM_MAX_TX_POWER_5GHZ) == 0xff)
2993 rt2x00_set_field16(&word, EEPROM_MAX_TX_POWER_5GHZ, MAX_A_TXPOWER);
2994 rt2x00_eeprom_write(rt2x00dev, EEPROM_MAX_TX_POWER, word);
2995
2996 return 0; 3454 return 0;
2997} 3455}
2998EXPORT_SYMBOL_GPL(rt2800_validate_eeprom); 3456EXPORT_SYMBOL_GPL(rt2800_validate_eeprom);
@@ -3009,10 +3467,15 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
3009 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom); 3467 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
3010 3468
3011 /* 3469 /*
3012 * Identify RF chipset. 3470 * Identify RF chipset by EEPROM value
3471 * RT28xx/RT30xx: defined in "EEPROM_NIC_CONF0_RF_TYPE" field
3472 * RT53xx: defined in "EEPROM_CHIP_ID" field
3013 */ 3473 */
3014 value = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE);
3015 rt2800_register_read(rt2x00dev, MAC_CSR0, &reg); 3474 rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
3475 if (rt2x00_get_field32(reg, MAC_CSR0_CHIPSET) == RT5390)
3476 rt2x00_eeprom_read(rt2x00dev, EEPROM_CHIP_ID, &value);
3477 else
3478 value = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE);
3016 3479
3017 rt2x00_set_chip(rt2x00dev, rt2x00_get_field32(reg, MAC_CSR0_CHIPSET), 3480 rt2x00_set_chip(rt2x00dev, rt2x00_get_field32(reg, MAC_CSR0_CHIPSET),
3018 value, rt2x00_get_field32(reg, MAC_CSR0_REVISION)); 3481 value, rt2x00_get_field32(reg, MAC_CSR0_REVISION));
@@ -3024,7 +3487,8 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
3024 !rt2x00_rt(rt2x00dev, RT3071) && 3487 !rt2x00_rt(rt2x00dev, RT3071) &&
3025 !rt2x00_rt(rt2x00dev, RT3090) && 3488 !rt2x00_rt(rt2x00dev, RT3090) &&
3026 !rt2x00_rt(rt2x00dev, RT3390) && 3489 !rt2x00_rt(rt2x00dev, RT3390) &&
3027 !rt2x00_rt(rt2x00dev, RT3572)) { 3490 !rt2x00_rt(rt2x00dev, RT3572) &&
3491 !rt2x00_rt(rt2x00dev, RT5390)) {
3028 ERROR(rt2x00dev, "Invalid RT chipset detected.\n"); 3492 ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
3029 return -ENODEV; 3493 return -ENODEV;
3030 } 3494 }
@@ -3038,7 +3502,8 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
3038 !rt2x00_rf(rt2x00dev, RF3021) && 3502 !rt2x00_rf(rt2x00dev, RF3021) &&
3039 !rt2x00_rf(rt2x00dev, RF3022) && 3503 !rt2x00_rf(rt2x00dev, RF3022) &&
3040 !rt2x00_rf(rt2x00dev, RF3052) && 3504 !rt2x00_rf(rt2x00dev, RF3052) &&
3041 !rt2x00_rf(rt2x00dev, RF3320)) { 3505 !rt2x00_rf(rt2x00dev, RF3320) &&
3506 !rt2x00_rf(rt2x00dev, RF5390)) {
3042 ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); 3507 ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
3043 return -ENODEV; 3508 return -ENODEV;
3044 } 3509 }
@@ -3046,11 +3511,35 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
3046 /* 3511 /*
3047 * Identify default antenna configuration. 3512 * Identify default antenna configuration.
3048 */ 3513 */
3049 rt2x00dev->default_ant.tx = 3514 rt2x00dev->default_ant.tx_chain_num =
3050 rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH); 3515 rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH);
3051 rt2x00dev->default_ant.rx = 3516 rt2x00dev->default_ant.rx_chain_num =
3052 rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH); 3517 rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH);
3053 3518
3519 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
3520
3521 if (rt2x00_rt(rt2x00dev, RT3070) ||
3522 rt2x00_rt(rt2x00dev, RT3090) ||
3523 rt2x00_rt(rt2x00dev, RT3390)) {
3524 value = rt2x00_get_field16(eeprom,
3525 EEPROM_NIC_CONF1_ANT_DIVERSITY);
3526 switch (value) {
3527 case 0:
3528 case 1:
3529 case 2:
3530 rt2x00dev->default_ant.tx = ANTENNA_A;
3531 rt2x00dev->default_ant.rx = ANTENNA_A;
3532 break;
3533 case 3:
3534 rt2x00dev->default_ant.tx = ANTENNA_A;
3535 rt2x00dev->default_ant.rx = ANTENNA_B;
3536 break;
3537 }
3538 } else {
3539 rt2x00dev->default_ant.tx = ANTENNA_A;
3540 rt2x00dev->default_ant.rx = ANTENNA_A;
3541 }
3542
3054 /* 3543 /*
3055 * Read frequency offset and RF programming sequence. 3544 * Read frequency offset and RF programming sequence.
3056 */ 3545 */
@@ -3084,6 +3573,15 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
3084 rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &rt2x00dev->led_mcu_reg); 3573 rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &rt2x00dev->led_mcu_reg);
3085#endif /* CONFIG_RT2X00_LIB_LEDS */ 3574#endif /* CONFIG_RT2X00_LIB_LEDS */
3086 3575
3576 /*
3577 * Check if support EIRP tx power limit feature.
3578 */
3579 rt2x00_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER, &eeprom);
3580
3581 if (rt2x00_get_field16(eeprom, EEPROM_EIRP_MAX_TX_POWER_2GHZ) <
3582 EIRP_MAX_TX_POWER_LIMIT)
3583 __set_bit(CONFIG_SUPPORT_POWER_LIMIT, &rt2x00dev->flags);
3584
3087 return 0; 3585 return 0;
3088} 3586}
3089EXPORT_SYMBOL_GPL(rt2800_init_eeprom); 3587EXPORT_SYMBOL_GPL(rt2800_init_eeprom);
@@ -3236,7 +3734,6 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
3236 char *default_power1; 3734 char *default_power1;
3237 char *default_power2; 3735 char *default_power2;
3238 unsigned int i; 3736 unsigned int i;
3239 unsigned short max_power;
3240 u16 eeprom; 3737 u16 eeprom;
3241 3738
3242 /* 3739 /*
@@ -3303,7 +3800,8 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
3303 rt2x00_rf(rt2x00dev, RF2020) || 3800 rt2x00_rf(rt2x00dev, RF2020) ||
3304 rt2x00_rf(rt2x00dev, RF3021) || 3801 rt2x00_rf(rt2x00dev, RF3021) ||
3305 rt2x00_rf(rt2x00dev, RF3022) || 3802 rt2x00_rf(rt2x00dev, RF3022) ||
3306 rt2x00_rf(rt2x00dev, RF3320)) { 3803 rt2x00_rf(rt2x00dev, RF3320) ||
3804 rt2x00_rf(rt2x00dev, RF5390)) {
3307 spec->num_channels = 14; 3805 spec->num_channels = 14;
3308 spec->channels = rf_vals_3x; 3806 spec->channels = rf_vals_3x;
3309 } else if (rt2x00_rf(rt2x00dev, RF3052)) { 3807 } else if (rt2x00_rf(rt2x00dev, RF3052)) {
@@ -3361,26 +3859,21 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
3361 3859
3362 spec->channels_info = info; 3860 spec->channels_info = info;
3363 3861
3364 rt2x00_eeprom_read(rt2x00dev, EEPROM_MAX_TX_POWER, &eeprom);
3365 max_power = rt2x00_get_field16(eeprom, EEPROM_MAX_TX_POWER_24GHZ);
3366 default_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1); 3862 default_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1);
3367 default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2); 3863 default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2);
3368 3864
3369 for (i = 0; i < 14; i++) { 3865 for (i = 0; i < 14; i++) {
3370 info[i].max_power = max_power; 3866 info[i].default_power1 = default_power1[i];
3371 info[i].default_power1 = TXPOWER_G_FROM_DEV(default_power1[i]); 3867 info[i].default_power2 = default_power2[i];
3372 info[i].default_power2 = TXPOWER_G_FROM_DEV(default_power2[i]);
3373 } 3868 }
3374 3869
3375 if (spec->num_channels > 14) { 3870 if (spec->num_channels > 14) {
3376 max_power = rt2x00_get_field16(eeprom, EEPROM_MAX_TX_POWER_5GHZ);
3377 default_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A1); 3871 default_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A1);
3378 default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2); 3872 default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2);
3379 3873
3380 for (i = 14; i < spec->num_channels; i++) { 3874 for (i = 14; i < spec->num_channels; i++) {
3381 info[i].max_power = max_power; 3875 info[i].default_power1 = default_power1[i];
3382 info[i].default_power1 = TXPOWER_A_FROM_DEV(default_power1[i]); 3876 info[i].default_power2 = default_power2[i];
3383 info[i].default_power2 = TXPOWER_A_FROM_DEV(default_power2[i]);
3384 } 3877 }
3385 } 3878 }
3386 3879
@@ -3530,7 +4023,8 @@ EXPORT_SYMBOL_GPL(rt2800_get_tsf);
3530 4023
3531int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 4024int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
3532 enum ieee80211_ampdu_mlme_action action, 4025 enum ieee80211_ampdu_mlme_action action,
3533 struct ieee80211_sta *sta, u16 tid, u16 *ssn) 4026 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
4027 u8 buf_size)
3534{ 4028{
3535 int ret = 0; 4029 int ret = 0;
3536 4030
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index e3c995a9dec..0c92d86a36f 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -156,6 +156,7 @@ void rt2800_txdone(struct rt2x00_dev *rt2x00dev);
156void rt2800_txdone_entry(struct queue_entry *entry, u32 status); 156void rt2800_txdone_entry(struct queue_entry *entry, u32 status);
157 157
158void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc); 158void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc);
159void rt2800_clear_beacon(struct queue_entry *entry);
159 160
160extern const struct rt2x00debug rt2800_rt2x00debug; 161extern const struct rt2x00debug rt2800_rt2x00debug;
161 162
@@ -198,7 +199,8 @@ int rt2800_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
198u64 rt2800_get_tsf(struct ieee80211_hw *hw); 199u64 rt2800_get_tsf(struct ieee80211_hw *hw);
199int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 200int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
200 enum ieee80211_ampdu_mlme_action action, 201 enum ieee80211_ampdu_mlme_action action,
201 struct ieee80211_sta *sta, u16 tid, u16 *ssn); 202 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
203 u8 buf_size);
202int rt2800_get_survey(struct ieee80211_hw *hw, int idx, 204int rt2800_get_survey(struct ieee80211_hw *hw, int idx,
203 struct survey_info *survey); 205 struct survey_info *survey);
204 206
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index 3b3f1e45ab3..38605e9fe42 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -200,11 +200,22 @@ static void rt2800pci_start_queue(struct data_queue *queue)
200 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg); 200 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
201 break; 201 break;
202 case QID_BEACON: 202 case QID_BEACON:
203 /*
204 * Allow beacon tasklets to be scheduled for periodic
205 * beacon updates.
206 */
207 tasklet_enable(&rt2x00dev->tbtt_tasklet);
208 tasklet_enable(&rt2x00dev->pretbtt_tasklet);
209
203 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg); 210 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
204 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1); 211 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
205 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1); 212 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
206 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1); 213 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
207 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); 214 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
215
216 rt2800_register_read(rt2x00dev, INT_TIMER_EN, &reg);
217 rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 1);
218 rt2800_register_write(rt2x00dev, INT_TIMER_EN, reg);
208 break; 219 break;
209 default: 220 default:
210 break; 221 break;
@@ -250,6 +261,16 @@ static void rt2800pci_stop_queue(struct data_queue *queue)
250 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0); 261 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0);
251 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0); 262 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
252 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); 263 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
264
265 rt2800_register_read(rt2x00dev, INT_TIMER_EN, &reg);
266 rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 0);
267 rt2800_register_write(rt2x00dev, INT_TIMER_EN, reg);
268
269 /*
270 * Wait for tbtt tasklets to finish.
271 */
272 tasklet_disable(&rt2x00dev->tbtt_tasklet);
273 tasklet_disable(&rt2x00dev->pretbtt_tasklet);
253 break; 274 break;
254 default: 275 default:
255 break; 276 break;
@@ -397,9 +418,9 @@ static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
397static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev, 418static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
398 enum dev_state state) 419 enum dev_state state)
399{ 420{
400 int mask = (state == STATE_RADIO_IRQ_ON) || 421 int mask = (state == STATE_RADIO_IRQ_ON);
401 (state == STATE_RADIO_IRQ_ON_ISR);
402 u32 reg; 422 u32 reg;
423 unsigned long flags;
403 424
404 /* 425 /*
405 * When interrupts are being enabled, the interrupt registers 426 * When interrupts are being enabled, the interrupt registers
@@ -408,8 +429,17 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
408 if (state == STATE_RADIO_IRQ_ON) { 429 if (state == STATE_RADIO_IRQ_ON) {
409 rt2800_register_read(rt2x00dev, INT_SOURCE_CSR, &reg); 430 rt2800_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
410 rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, reg); 431 rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
432
433 /*
434 * Enable tasklets. The beacon related tasklets are
435 * enabled when the beacon queue is started.
436 */
437 tasklet_enable(&rt2x00dev->txstatus_tasklet);
438 tasklet_enable(&rt2x00dev->rxdone_tasklet);
439 tasklet_enable(&rt2x00dev->autowake_tasklet);
411 } 440 }
412 441
442 spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
413 rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg); 443 rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg);
414 rt2x00_set_field32(&reg, INT_MASK_CSR_RXDELAYINT, 0); 444 rt2x00_set_field32(&reg, INT_MASK_CSR_RXDELAYINT, 0);
415 rt2x00_set_field32(&reg, INT_MASK_CSR_TXDELAYINT, 0); 445 rt2x00_set_field32(&reg, INT_MASK_CSR_TXDELAYINT, 0);
@@ -430,6 +460,17 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
430 rt2x00_set_field32(&reg, INT_MASK_CSR_RX_COHERENT, 0); 460 rt2x00_set_field32(&reg, INT_MASK_CSR_RX_COHERENT, 0);
431 rt2x00_set_field32(&reg, INT_MASK_CSR_TX_COHERENT, 0); 461 rt2x00_set_field32(&reg, INT_MASK_CSR_TX_COHERENT, 0);
432 rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg); 462 rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
463 spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
464
465 if (state == STATE_RADIO_IRQ_OFF) {
466 /*
467 * Ensure that all tasklets are finished before
468 * disabling the interrupts.
469 */
470 tasklet_disable(&rt2x00dev->txstatus_tasklet);
471 tasklet_disable(&rt2x00dev->rxdone_tasklet);
472 tasklet_disable(&rt2x00dev->autowake_tasklet);
473 }
433} 474}
434 475
435static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev) 476static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev)
@@ -452,6 +493,13 @@ static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev)
452 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f); 493 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
453 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00); 494 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
454 495
496 if (rt2x00_rt(rt2x00dev, RT5390)) {
497 rt2800_register_read(rt2x00dev, AUX_CTRL, &reg);
498 rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
499 rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
500 rt2800_register_write(rt2x00dev, AUX_CTRL, reg);
501 }
502
455 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003); 503 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
456 504
457 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg); 505 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
@@ -475,39 +523,23 @@ static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
475 523
476static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev) 524static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev)
477{ 525{
478 u32 reg; 526 if (rt2x00_is_soc(rt2x00dev)) {
479 527 rt2800_disable_radio(rt2x00dev);
480 rt2800_disable_radio(rt2x00dev); 528 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0);
481 529 rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0);
482 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00001280); 530 }
483
484 rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
485 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
486 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
487 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
488 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
489 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
490 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
491 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
492 rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
493
494 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
495 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
496} 531}
497 532
498static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev, 533static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev,
499 enum dev_state state) 534 enum dev_state state)
500{ 535{
501 /*
502 * Always put the device to sleep (even when we intend to wakeup!)
503 * if the device is booting and wasn't asleep it will return
504 * failure when attempting to wakeup.
505 */
506 rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0xff, 2);
507
508 if (state == STATE_AWAKE) { 536 if (state == STATE_AWAKE) {
509 rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, TOKEN_WAKUP, 0, 0); 537 rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, TOKEN_WAKUP, 0, 0x02);
510 rt2800pci_mcu_status(rt2x00dev, TOKEN_WAKUP); 538 rt2800pci_mcu_status(rt2x00dev, TOKEN_WAKUP);
539 } else if (state == STATE_SLEEP) {
540 rt2800_register_write(rt2x00dev, H2M_MAILBOX_STATUS, 0xffffffff);
541 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CID, 0xffffffff);
542 rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0x01, 0xff, 0x01);
511 } 543 }
512 544
513 return 0; 545 return 0;
@@ -538,9 +570,7 @@ static int rt2800pci_set_device_state(struct rt2x00_dev *rt2x00dev,
538 rt2800pci_set_state(rt2x00dev, STATE_SLEEP); 570 rt2800pci_set_state(rt2x00dev, STATE_SLEEP);
539 break; 571 break;
540 case STATE_RADIO_IRQ_ON: 572 case STATE_RADIO_IRQ_ON:
541 case STATE_RADIO_IRQ_ON_ISR:
542 case STATE_RADIO_IRQ_OFF: 573 case STATE_RADIO_IRQ_OFF:
543 case STATE_RADIO_IRQ_OFF_ISR:
544 rt2800pci_toggle_irq(rt2x00dev, state); 574 rt2800pci_toggle_irq(rt2x00dev, state);
545 break; 575 break;
546 case STATE_DEEP_SLEEP: 576 case STATE_DEEP_SLEEP:
@@ -732,45 +762,60 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
732 } 762 }
733} 763}
734 764
735static void rt2800pci_txstatus_tasklet(unsigned long data) 765static void rt2800pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
766 struct rt2x00_field32 irq_field)
736{ 767{
737 rt2800pci_txdone((struct rt2x00_dev *)data); 768 unsigned long flags;
738} 769 u32 reg;
739
740static irqreturn_t rt2800pci_interrupt_thread(int irq, void *dev_instance)
741{
742 struct rt2x00_dev *rt2x00dev = dev_instance;
743 u32 reg = rt2x00dev->irqvalue[0];
744 770
745 /* 771 /*
746 * 1 - Pre TBTT interrupt. 772 * Enable a single interrupt. The interrupt mask register
773 * access needs locking.
747 */ 774 */
748 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT)) 775 spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
749 rt2x00lib_pretbtt(rt2x00dev); 776 rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg);
777 rt2x00_set_field32(&reg, irq_field, 1);
778 rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
779 spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
780}
750 781
751 /* 782static void rt2800pci_txstatus_tasklet(unsigned long data)
752 * 2 - Beacondone interrupt. 783{
753 */ 784 rt2800pci_txdone((struct rt2x00_dev *)data);
754 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT))
755 rt2x00lib_beacondone(rt2x00dev);
756 785
757 /* 786 /*
758 * 3 - Rx ring done interrupt. 787 * No need to enable the tx status interrupt here as we always
788 * leave it enabled to minimize the possibility of a tx status
789 * register overflow. See comment in interrupt handler.
759 */ 790 */
760 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE)) 791}
761 rt2x00pci_rxdone(rt2x00dev);
762 792
763 /* 793static void rt2800pci_pretbtt_tasklet(unsigned long data)
764 * 4 - Auto wakeup interrupt. 794{
765 */ 795 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
766 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP)) 796 rt2x00lib_pretbtt(rt2x00dev);
767 rt2800pci_wakeup(rt2x00dev); 797 rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_PRE_TBTT);
798}
768 799
769 /* Enable interrupts again. */ 800static void rt2800pci_tbtt_tasklet(unsigned long data)
770 rt2x00dev->ops->lib->set_device_state(rt2x00dev, 801{
771 STATE_RADIO_IRQ_ON_ISR); 802 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
803 rt2x00lib_beacondone(rt2x00dev);
804 rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_TBTT);
805}
772 806
773 return IRQ_HANDLED; 807static void rt2800pci_rxdone_tasklet(unsigned long data)
808{
809 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
810 rt2x00pci_rxdone(rt2x00dev);
811 rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_RX_DONE);
812}
813
814static void rt2800pci_autowake_tasklet(unsigned long data)
815{
816 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
817 rt2800pci_wakeup(rt2x00dev);
818 rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_AUTO_WAKEUP);
774} 819}
775 820
776static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev) 821static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
@@ -816,8 +861,8 @@ static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
816static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance) 861static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
817{ 862{
818 struct rt2x00_dev *rt2x00dev = dev_instance; 863 struct rt2x00_dev *rt2x00dev = dev_instance;
819 u32 reg; 864 u32 reg, mask;
820 irqreturn_t ret = IRQ_HANDLED; 865 unsigned long flags;
821 866
822 /* Read status and ACK all interrupts */ 867 /* Read status and ACK all interrupts */
823 rt2800_register_read(rt2x00dev, INT_SOURCE_CSR, &reg); 868 rt2800_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
@@ -829,38 +874,44 @@ static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
829 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 874 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
830 return IRQ_HANDLED; 875 return IRQ_HANDLED;
831 876
832 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) 877 /*
833 rt2800pci_txstatus_interrupt(rt2x00dev); 878 * Since INT_MASK_CSR and INT_SOURCE_CSR use the same bits
879 * for interrupts and interrupt masks we can just use the value of
880 * INT_SOURCE_CSR to create the interrupt mask.
881 */
882 mask = ~reg;
834 883
835 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT) || 884 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) {
836 rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT) || 885 rt2800pci_txstatus_interrupt(rt2x00dev);
837 rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE) ||
838 rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP)) {
839 /* 886 /*
840 * All other interrupts are handled in the interrupt thread. 887 * Never disable the TX_FIFO_STATUS interrupt.
841 * Store irqvalue for use in the interrupt thread.
842 */ 888 */
843 rt2x00dev->irqvalue[0] = reg; 889 rt2x00_set_field32(&mask, INT_MASK_CSR_TX_FIFO_STATUS, 1);
890 }
844 891
845 /* 892 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT))
846 * Disable interrupts, will be enabled again in the 893 tasklet_hi_schedule(&rt2x00dev->pretbtt_tasklet);
847 * interrupt thread.
848 */
849 rt2x00dev->ops->lib->set_device_state(rt2x00dev,
850 STATE_RADIO_IRQ_OFF_ISR);
851 894
852 /* 895 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT))
853 * Leave the TX_FIFO_STATUS interrupt enabled to not lose any 896 tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
854 * tx status reports.
855 */
856 rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg);
857 rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, 1);
858 rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
859 897
860 ret = IRQ_WAKE_THREAD; 898 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE))
861 } 899 tasklet_schedule(&rt2x00dev->rxdone_tasklet);
862 900
863 return ret; 901 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP))
902 tasklet_schedule(&rt2x00dev->autowake_tasklet);
903
904 /*
905 * Disable all interrupts for which a tasklet was scheduled right now,
906 * the tasklet will reenable the appropriate interrupts.
907 */
908 spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
909 rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg);
910 reg &= mask;
911 rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
912 spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
913
914 return IRQ_HANDLED;
864} 915}
865 916
866/* 917/*
@@ -975,8 +1026,11 @@ static const struct rt2800_ops rt2800pci_rt2800_ops = {
975 1026
976static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = { 1027static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
977 .irq_handler = rt2800pci_interrupt, 1028 .irq_handler = rt2800pci_interrupt,
978 .irq_handler_thread = rt2800pci_interrupt_thread, 1029 .txstatus_tasklet = rt2800pci_txstatus_tasklet,
979 .txstatus_tasklet = rt2800pci_txstatus_tasklet, 1030 .pretbtt_tasklet = rt2800pci_pretbtt_tasklet,
1031 .tbtt_tasklet = rt2800pci_tbtt_tasklet,
1032 .rxdone_tasklet = rt2800pci_rxdone_tasklet,
1033 .autowake_tasklet = rt2800pci_autowake_tasklet,
980 .probe_hw = rt2800pci_probe_hw, 1034 .probe_hw = rt2800pci_probe_hw,
981 .get_firmware_name = rt2800pci_get_firmware_name, 1035 .get_firmware_name = rt2800pci_get_firmware_name,
982 .check_firmware = rt2800_check_firmware, 1036 .check_firmware = rt2800_check_firmware,
@@ -996,6 +1050,7 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
996 .write_tx_desc = rt2800pci_write_tx_desc, 1050 .write_tx_desc = rt2800pci_write_tx_desc,
997 .write_tx_data = rt2800_write_tx_data, 1051 .write_tx_data = rt2800_write_tx_data,
998 .write_beacon = rt2800_write_beacon, 1052 .write_beacon = rt2800_write_beacon,
1053 .clear_beacon = rt2800_clear_beacon,
999 .fill_rxdone = rt2800pci_fill_rxdone, 1054 .fill_rxdone = rt2800pci_fill_rxdone,
1000 .config_shared_key = rt2800_config_shared_key, 1055 .config_shared_key = rt2800_config_shared_key,
1001 .config_pairwise_key = rt2800_config_pairwise_key, 1056 .config_pairwise_key = rt2800_config_pairwise_key,
@@ -1079,6 +1134,9 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
1079 { PCI_DEVICE(0x1814, 0x3592), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1134 { PCI_DEVICE(0x1814, 0x3592), PCI_DEVICE_DATA(&rt2800pci_ops) },
1080 { PCI_DEVICE(0x1814, 0x3593), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1135 { PCI_DEVICE(0x1814, 0x3593), PCI_DEVICE_DATA(&rt2800pci_ops) },
1081#endif 1136#endif
1137#ifdef CONFIG_RT2800PCI_RT53XX
1138 { PCI_DEVICE(0x1814, 0x5390), PCI_DEVICE_DATA(&rt2800pci_ops) },
1139#endif
1082 { 0, } 1140 { 0, }
1083}; 1141};
1084#endif /* CONFIG_PCI */ 1142#endif /* CONFIG_PCI */
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 197a36c05fd..5d91561e0de 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -253,9 +253,7 @@ static int rt2800usb_set_device_state(struct rt2x00_dev *rt2x00dev,
253 rt2800usb_set_state(rt2x00dev, STATE_SLEEP); 253 rt2800usb_set_state(rt2x00dev, STATE_SLEEP);
254 break; 254 break;
255 case STATE_RADIO_IRQ_ON: 255 case STATE_RADIO_IRQ_ON:
256 case STATE_RADIO_IRQ_ON_ISR:
257 case STATE_RADIO_IRQ_OFF: 256 case STATE_RADIO_IRQ_OFF:
258 case STATE_RADIO_IRQ_OFF_ISR:
259 /* No support, but no error either */ 257 /* No support, but no error either */
260 break; 258 break;
261 case STATE_DEEP_SLEEP: 259 case STATE_DEEP_SLEEP:
@@ -639,6 +637,7 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
639 .write_tx_desc = rt2800usb_write_tx_desc, 637 .write_tx_desc = rt2800usb_write_tx_desc,
640 .write_tx_data = rt2800usb_write_tx_data, 638 .write_tx_data = rt2800usb_write_tx_data,
641 .write_beacon = rt2800_write_beacon, 639 .write_beacon = rt2800_write_beacon,
640 .clear_beacon = rt2800_clear_beacon,
642 .get_tx_data_len = rt2800usb_get_tx_data_len, 641 .get_tx_data_len = rt2800usb_get_tx_data_len,
643 .fill_rxdone = rt2800usb_fill_rxdone, 642 .fill_rxdone = rt2800usb_fill_rxdone,
644 .config_shared_key = rt2800_config_shared_key, 643 .config_shared_key = rt2800_config_shared_key,
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 84aaf393da4..1df432c1f2c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -189,6 +189,7 @@ struct rt2x00_chip {
189#define RT3572 0x3572 189#define RT3572 0x3572
190#define RT3593 0x3593 /* PCIe */ 190#define RT3593 0x3593 /* PCIe */
191#define RT3883 0x3883 /* WSOC */ 191#define RT3883 0x3883 /* WSOC */
192#define RT5390 0x5390 /* 2.4GHz */
192 193
193 u16 rf; 194 u16 rf;
194 u16 rev; 195 u16 rev;
@@ -225,6 +226,8 @@ struct channel_info {
225struct antenna_setup { 226struct antenna_setup {
226 enum antenna rx; 227 enum antenna rx;
227 enum antenna tx; 228 enum antenna tx;
229 u8 rx_chain_num;
230 u8 tx_chain_num;
228}; 231};
229 232
230/* 233/*
@@ -368,6 +371,7 @@ struct rt2x00_intf {
368 * dedicated beacon entry. 371 * dedicated beacon entry.
369 */ 372 */
370 struct queue_entry *beacon; 373 struct queue_entry *beacon;
374 bool enable_beacon;
371 375
372 /* 376 /*
373 * Actions that needed rescheduling. 377 * Actions that needed rescheduling.
@@ -511,14 +515,13 @@ struct rt2x00lib_ops {
511 irq_handler_t irq_handler; 515 irq_handler_t irq_handler;
512 516
513 /* 517 /*
514 * Threaded Interrupt handlers.
515 */
516 irq_handler_t irq_handler_thread;
517
518 /*
519 * TX status tasklet handler. 518 * TX status tasklet handler.
520 */ 519 */
521 void (*txstatus_tasklet) (unsigned long data); 520 void (*txstatus_tasklet) (unsigned long data);
521 void (*pretbtt_tasklet) (unsigned long data);
522 void (*tbtt_tasklet) (unsigned long data);
523 void (*rxdone_tasklet) (unsigned long data);
524 void (*autowake_tasklet) (unsigned long data);
522 525
523 /* 526 /*
524 * Device init handlers. 527 * Device init handlers.
@@ -573,6 +576,7 @@ struct rt2x00lib_ops {
573 struct txentry_desc *txdesc); 576 struct txentry_desc *txdesc);
574 void (*write_beacon) (struct queue_entry *entry, 577 void (*write_beacon) (struct queue_entry *entry,
575 struct txentry_desc *txdesc); 578 struct txentry_desc *txdesc);
579 void (*clear_beacon) (struct queue_entry *entry);
576 int (*get_tx_data_len) (struct queue_entry *entry); 580 int (*get_tx_data_len) (struct queue_entry *entry);
577 581
578 /* 582 /*
@@ -664,6 +668,7 @@ enum rt2x00_flags {
664 */ 668 */
665 CONFIG_SUPPORT_HW_BUTTON, 669 CONFIG_SUPPORT_HW_BUTTON,
666 CONFIG_SUPPORT_HW_CRYPTO, 670 CONFIG_SUPPORT_HW_CRYPTO,
671 CONFIG_SUPPORT_POWER_LIMIT,
667 DRIVER_SUPPORT_CONTROL_FILTERS, 672 DRIVER_SUPPORT_CONTROL_FILTERS,
668 DRIVER_SUPPORT_CONTROL_FILTER_PSPOLL, 673 DRIVER_SUPPORT_CONTROL_FILTER_PSPOLL,
669 DRIVER_SUPPORT_PRE_TBTT_INTERRUPT, 674 DRIVER_SUPPORT_PRE_TBTT_INTERRUPT,
@@ -788,10 +793,12 @@ struct rt2x00_dev {
788 * - Open ap interface count. 793 * - Open ap interface count.
789 * - Open sta interface count. 794 * - Open sta interface count.
790 * - Association count. 795 * - Association count.
796 * - Beaconing enabled count.
791 */ 797 */
792 unsigned int intf_ap_count; 798 unsigned int intf_ap_count;
793 unsigned int intf_sta_count; 799 unsigned int intf_sta_count;
794 unsigned int intf_associated; 800 unsigned int intf_associated;
801 unsigned int intf_beaconing;
795 802
796 /* 803 /*
797 * Link quality 804 * Link quality
@@ -857,6 +864,13 @@ struct rt2x00_dev {
857 */ 864 */
858 struct ieee80211_low_level_stats low_level_stats; 865 struct ieee80211_low_level_stats low_level_stats;
859 866
867 /**
868 * Work queue for all work which should not be placed
869 * on the mac80211 workqueue (because of dependencies
870 * between various work structures).
871 */
872 struct workqueue_struct *workqueue;
873
860 /* 874 /*
861 * Scheduled work. 875 * Scheduled work.
862 * NOTE: intf_work will use ieee80211_iterate_active_interfaces() 876 * NOTE: intf_work will use ieee80211_iterate_active_interfaces()
@@ -887,12 +901,6 @@ struct rt2x00_dev {
887 const struct firmware *fw; 901 const struct firmware *fw;
888 902
889 /* 903 /*
890 * Interrupt values, stored between interrupt service routine
891 * and interrupt thread routine.
892 */
893 u32 irqvalue[2];
894
895 /*
896 * FIFO for storing tx status reports between isr and tasklet. 904 * FIFO for storing tx status reports between isr and tasklet.
897 */ 905 */
898 DECLARE_KFIFO_PTR(txstatus_fifo, u32); 906 DECLARE_KFIFO_PTR(txstatus_fifo, u32);
@@ -901,6 +909,15 @@ struct rt2x00_dev {
901 * Tasklet for processing tx status reports (rt2800pci). 909 * Tasklet for processing tx status reports (rt2800pci).
902 */ 910 */
903 struct tasklet_struct txstatus_tasklet; 911 struct tasklet_struct txstatus_tasklet;
912 struct tasklet_struct pretbtt_tasklet;
913 struct tasklet_struct tbtt_tasklet;
914 struct tasklet_struct rxdone_tasklet;
915 struct tasklet_struct autowake_tasklet;
916
917 /*
918 * Protect the interrupt mask register.
919 */
920 spinlock_t irqmask_lock;
904}; 921};
905 922
906/* 923/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 9597a03242c..9de9dbe9439 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -121,7 +121,7 @@ static void rt2x00lib_intf_scheduled_iter(void *data, u8 *mac,
121 return; 121 return;
122 122
123 if (test_and_clear_bit(DELAYED_UPDATE_BEACON, &intf->delayed_flags)) 123 if (test_and_clear_bit(DELAYED_UPDATE_BEACON, &intf->delayed_flags))
124 rt2x00queue_update_beacon(rt2x00dev, vif, true); 124 rt2x00queue_update_beacon(rt2x00dev, vif);
125} 125}
126 126
127static void rt2x00lib_intf_scheduled(struct work_struct *work) 127static void rt2x00lib_intf_scheduled(struct work_struct *work)
@@ -174,7 +174,13 @@ static void rt2x00lib_beaconupdate_iter(void *data, u8 *mac,
174 vif->type != NL80211_IFTYPE_WDS) 174 vif->type != NL80211_IFTYPE_WDS)
175 return; 175 return;
176 176
177 rt2x00queue_update_beacon(rt2x00dev, vif, true); 177 /*
178 * Update the beacon without locking. This is safe on PCI devices
179 * as they only update the beacon periodically here. This should
180 * never be called for USB devices.
181 */
182 WARN_ON(rt2x00_is_usb(rt2x00dev));
183 rt2x00queue_update_beacon_locked(rt2x00dev, vif);
178} 184}
179 185
180void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev) 186void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
@@ -183,9 +189,9 @@ void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
183 return; 189 return;
184 190
185 /* send buffered bc/mc frames out for every bssid */ 191 /* send buffered bc/mc frames out for every bssid */
186 ieee80211_iterate_active_interfaces(rt2x00dev->hw, 192 ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw,
187 rt2x00lib_bc_buffer_iter, 193 rt2x00lib_bc_buffer_iter,
188 rt2x00dev); 194 rt2x00dev);
189 /* 195 /*
190 * Devices with pre tbtt interrupt don't need to update the beacon 196 * Devices with pre tbtt interrupt don't need to update the beacon
191 * here as they will fetch the next beacon directly prior to 197 * here as they will fetch the next beacon directly prior to
@@ -195,9 +201,9 @@ void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
195 return; 201 return;
196 202
197 /* fetch next beacon */ 203 /* fetch next beacon */
198 ieee80211_iterate_active_interfaces(rt2x00dev->hw, 204 ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw,
199 rt2x00lib_beaconupdate_iter, 205 rt2x00lib_beaconupdate_iter,
200 rt2x00dev); 206 rt2x00dev);
201} 207}
202EXPORT_SYMBOL_GPL(rt2x00lib_beacondone); 208EXPORT_SYMBOL_GPL(rt2x00lib_beacondone);
203 209
@@ -207,9 +213,9 @@ void rt2x00lib_pretbtt(struct rt2x00_dev *rt2x00dev)
207 return; 213 return;
208 214
209 /* fetch next beacon */ 215 /* fetch next beacon */
210 ieee80211_iterate_active_interfaces(rt2x00dev->hw, 216 ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw,
211 rt2x00lib_beaconupdate_iter, 217 rt2x00lib_beaconupdate_iter,
212 rt2x00dev); 218 rt2x00dev);
213} 219}
214EXPORT_SYMBOL_GPL(rt2x00lib_pretbtt); 220EXPORT_SYMBOL_GPL(rt2x00lib_pretbtt);
215 221
@@ -649,7 +655,10 @@ static void rt2x00lib_channel(struct ieee80211_channel *entry,
649 const int channel, const int tx_power, 655 const int channel, const int tx_power,
650 const int value) 656 const int value)
651{ 657{
652 entry->center_freq = ieee80211_channel_to_frequency(channel); 658 /* XXX: this assumption about the band is wrong for 802.11j */
659 entry->band = channel <= 14 ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
660 entry->center_freq = ieee80211_channel_to_frequency(channel,
661 entry->band);
653 entry->hw_value = value; 662 entry->hw_value = value;
654 entry->max_power = tx_power; 663 entry->max_power = tx_power;
655 entry->max_antenna_gain = 0xff; 664 entry->max_antenna_gain = 0xff;
@@ -812,15 +821,29 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
812 GFP_KERNEL); 821 GFP_KERNEL);
813 if (status) 822 if (status)
814 return status; 823 return status;
824 }
815 825
816 /* tasklet for processing the tx status reports. */ 826 /*
817 if (rt2x00dev->ops->lib->txstatus_tasklet) 827 * Initialize tasklets if used by the driver. Tasklets are
818 tasklet_init(&rt2x00dev->txstatus_tasklet, 828 * disabled until the interrupts are turned on. The driver
819 rt2x00dev->ops->lib->txstatus_tasklet, 829 * has to handle that.
820 (unsigned long)rt2x00dev); 830 */
821 831#define RT2X00_TASKLET_INIT(taskletname) \
832 if (rt2x00dev->ops->lib->taskletname) { \
833 tasklet_init(&rt2x00dev->taskletname, \
834 rt2x00dev->ops->lib->taskletname, \
835 (unsigned long)rt2x00dev); \
836 tasklet_disable(&rt2x00dev->taskletname); \
822 } 837 }
823 838
839 RT2X00_TASKLET_INIT(txstatus_tasklet);
840 RT2X00_TASKLET_INIT(pretbtt_tasklet);
841 RT2X00_TASKLET_INIT(tbtt_tasklet);
842 RT2X00_TASKLET_INIT(rxdone_tasklet);
843 RT2X00_TASKLET_INIT(autowake_tasklet);
844
845#undef RT2X00_TASKLET_INIT
846
824 /* 847 /*
825 * Register HW. 848 * Register HW.
826 */ 849 */
@@ -949,6 +972,7 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
949{ 972{
950 int retval = -ENOMEM; 973 int retval = -ENOMEM;
951 974
975 spin_lock_init(&rt2x00dev->irqmask_lock);
952 mutex_init(&rt2x00dev->csr_mutex); 976 mutex_init(&rt2x00dev->csr_mutex);
953 977
954 set_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); 978 set_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
@@ -973,8 +997,15 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
973 BIT(NL80211_IFTYPE_WDS); 997 BIT(NL80211_IFTYPE_WDS);
974 998
975 /* 999 /*
976 * Initialize configuration work. 1000 * Initialize work.
977 */ 1001 */
1002 rt2x00dev->workqueue =
1003 alloc_ordered_workqueue(wiphy_name(rt2x00dev->hw->wiphy), 0);
1004 if (!rt2x00dev->workqueue) {
1005 retval = -ENOMEM;
1006 goto exit;
1007 }
1008
978 INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled); 1009 INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled);
979 1010
980 /* 1011 /*
@@ -1033,6 +1064,7 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
1033 cancel_work_sync(&rt2x00dev->intf_work); 1064 cancel_work_sync(&rt2x00dev->intf_work);
1034 cancel_work_sync(&rt2x00dev->rxdone_work); 1065 cancel_work_sync(&rt2x00dev->rxdone_work);
1035 cancel_work_sync(&rt2x00dev->txdone_work); 1066 cancel_work_sync(&rt2x00dev->txdone_work);
1067 destroy_workqueue(rt2x00dev->workqueue);
1036 1068
1037 /* 1069 /*
1038 * Free the tx status fifo. 1070 * Free the tx status fifo.
@@ -1043,6 +1075,10 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
1043 * Kill the tx status tasklet. 1075 * Kill the tx status tasklet.
1044 */ 1076 */
1045 tasklet_kill(&rt2x00dev->txstatus_tasklet); 1077 tasklet_kill(&rt2x00dev->txstatus_tasklet);
1078 tasklet_kill(&rt2x00dev->pretbtt_tasklet);
1079 tasklet_kill(&rt2x00dev->tbtt_tasklet);
1080 tasklet_kill(&rt2x00dev->rxdone_tasklet);
1081 tasklet_kill(&rt2x00dev->autowake_tasklet);
1046 1082
1047 /* 1083 /*
1048 * Uninitialize device. 1084 * Uninitialize device.
diff --git a/drivers/net/wireless/rt2x00/rt2x00ht.c b/drivers/net/wireless/rt2x00/rt2x00ht.c
index b7ad46ecaa1..03d9579da68 100644
--- a/drivers/net/wireless/rt2x00/rt2x00ht.c
+++ b/drivers/net/wireless/rt2x00/rt2x00ht.c
@@ -69,7 +69,6 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
69 txdesc->mcs |= 0x08; 69 txdesc->mcs |= 0x08;
70 } 70 }
71 71
72
73 /* 72 /*
74 * This frame is eligible for an AMPDU, however, don't aggregate 73 * This frame is eligible for an AMPDU, however, don't aggregate
75 * frames that are intended to probe a specific tx rate. 74 * frames that are intended to probe a specific tx rate.
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index a105c500627..2d94cbaf5f4 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -157,14 +157,30 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
157 bool local); 157 bool local);
158 158
159/** 159/**
160 * rt2x00queue_update_beacon - Send new beacon from mac80211 to hardware 160 * rt2x00queue_update_beacon - Send new beacon from mac80211
161 * to hardware. Handles locking by itself (mutex).
161 * @rt2x00dev: Pointer to &struct rt2x00_dev. 162 * @rt2x00dev: Pointer to &struct rt2x00_dev.
162 * @vif: Interface for which the beacon should be updated. 163 * @vif: Interface for which the beacon should be updated.
163 * @enable_beacon: Enable beaconing
164 */ 164 */
165int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev, 165int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
166 struct ieee80211_vif *vif, 166 struct ieee80211_vif *vif);
167 const bool enable_beacon); 167
168/**
169 * rt2x00queue_update_beacon_locked - Send new beacon from mac80211
170 * to hardware. Caller needs to ensure locking.
171 * @rt2x00dev: Pointer to &struct rt2x00_dev.
172 * @vif: Interface for which the beacon should be updated.
173 */
174int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev,
175 struct ieee80211_vif *vif);
176
177/**
178 * rt2x00queue_clear_beacon - Clear beacon in hardware
179 * @rt2x00dev: Pointer to &struct rt2x00_dev.
180 * @vif: Interface for which the beacon should be updated.
181 */
182int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev,
183 struct ieee80211_vif *vif);
168 184
169/** 185/**
170 * rt2x00queue_index_inc - Index incrementation function 186 * rt2x00queue_index_inc - Index incrementation function
diff --git a/drivers/net/wireless/rt2x00/rt2x00link.c b/drivers/net/wireless/rt2x00/rt2x00link.c
index bfda60eaf4e..c975b0a12e9 100644
--- a/drivers/net/wireless/rt2x00/rt2x00link.c
+++ b/drivers/net/wireless/rt2x00/rt2x00link.c
@@ -417,7 +417,8 @@ void rt2x00link_start_watchdog(struct rt2x00_dev *rt2x00dev)
417 !test_bit(DRIVER_SUPPORT_WATCHDOG, &rt2x00dev->flags)) 417 !test_bit(DRIVER_SUPPORT_WATCHDOG, &rt2x00dev->flags))
418 return; 418 return;
419 419
420 schedule_delayed_work(&link->watchdog_work, WATCHDOG_INTERVAL); 420 ieee80211_queue_delayed_work(rt2x00dev->hw,
421 &link->watchdog_work, WATCHDOG_INTERVAL);
421} 422}
422 423
423void rt2x00link_stop_watchdog(struct rt2x00_dev *rt2x00dev) 424void rt2x00link_stop_watchdog(struct rt2x00_dev *rt2x00dev)
@@ -441,7 +442,9 @@ static void rt2x00link_watchdog(struct work_struct *work)
441 rt2x00dev->ops->lib->watchdog(rt2x00dev); 442 rt2x00dev->ops->lib->watchdog(rt2x00dev);
442 443
443 if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) 444 if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
444 schedule_delayed_work(&link->watchdog_work, WATCHDOG_INTERVAL); 445 ieee80211_queue_delayed_work(rt2x00dev->hw,
446 &link->watchdog_work,
447 WATCHDOG_INTERVAL);
445} 448}
446 449
447void rt2x00link_register(struct rt2x00_dev *rt2x00dev) 450void rt2x00link_register(struct rt2x00_dev *rt2x00dev)
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index f3da051df39..1b3edef9e3d 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -139,9 +139,9 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
139 * either RTS or CTS-to-self frame and handles everything 139 * either RTS or CTS-to-self frame and handles everything
140 * inside the hardware. 140 * inside the hardware.
141 */ 141 */
142 if ((tx_info->control.rates[0].flags & (IEEE80211_TX_RC_USE_RTS_CTS | 142 if (!rt2x00dev->ops->hw->set_rts_threshold &&
143 IEEE80211_TX_RC_USE_CTS_PROTECT)) && 143 (tx_info->control.rates[0].flags & (IEEE80211_TX_RC_USE_RTS_CTS |
144 !rt2x00dev->ops->hw->set_rts_threshold) { 144 IEEE80211_TX_RC_USE_CTS_PROTECT))) {
145 if (rt2x00queue_available(queue) <= 1) 145 if (rt2x00queue_available(queue) <= 1)
146 goto exit_fail; 146 goto exit_fail;
147 147
@@ -617,11 +617,47 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
617 bss_conf->bssid); 617 bss_conf->bssid);
618 618
619 /* 619 /*
620 * Update the beacon. 620 * Update the beacon. This is only required on USB devices. PCI
621 * devices fetch beacons periodically.
621 */ 622 */
622 if (changes & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED)) 623 if (changes & BSS_CHANGED_BEACON && rt2x00_is_usb(rt2x00dev))
623 rt2x00queue_update_beacon(rt2x00dev, vif, 624 rt2x00queue_update_beacon(rt2x00dev, vif);
624 bss_conf->enable_beacon); 625
626 /*
627 * Start/stop beaconing.
628 */
629 if (changes & BSS_CHANGED_BEACON_ENABLED) {
630 if (!bss_conf->enable_beacon && intf->enable_beacon) {
631 rt2x00queue_clear_beacon(rt2x00dev, vif);
632 rt2x00dev->intf_beaconing--;
633 intf->enable_beacon = false;
634
635 if (rt2x00dev->intf_beaconing == 0) {
636 /*
637 * Last beaconing interface disabled
638 * -> stop beacon queue.
639 */
640 mutex_lock(&intf->beacon_skb_mutex);
641 rt2x00queue_stop_queue(rt2x00dev->bcn);
642 mutex_unlock(&intf->beacon_skb_mutex);
643 }
644
645
646 } else if (bss_conf->enable_beacon && !intf->enable_beacon) {
647 rt2x00dev->intf_beaconing++;
648 intf->enable_beacon = true;
649
650 if (rt2x00dev->intf_beaconing == 1) {
651 /*
652 * First beaconing interface enabled
653 * -> start beacon queue.
654 */
655 mutex_lock(&intf->beacon_skb_mutex);
656 rt2x00queue_start_queue(rt2x00dev->bcn);
657 mutex_unlock(&intf->beacon_skb_mutex);
658 }
659 }
660 }
625 661
626 /* 662 /*
627 * When the association status has changed we must reset the link 663 * When the association status has changed we must reset the link
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index ace0b668c04..4dd82b0b052 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -160,10 +160,9 @@ int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
160 /* 160 /*
161 * Register interrupt handler. 161 * Register interrupt handler.
162 */ 162 */
163 status = request_threaded_irq(rt2x00dev->irq, 163 status = request_irq(rt2x00dev->irq,
164 rt2x00dev->ops->lib->irq_handler, 164 rt2x00dev->ops->lib->irq_handler,
165 rt2x00dev->ops->lib->irq_handler_thread, 165 IRQF_SHARED, rt2x00dev->name, rt2x00dev);
166 IRQF_SHARED, rt2x00dev->name, rt2x00dev);
167 if (status) { 166 if (status) {
168 ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n", 167 ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n",
169 rt2x00dev->irq, status); 168 rt2x00dev->irq, status);
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index ca82b3a9169..bf9bba35628 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -365,13 +365,10 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
365 365
366 /* 366 /*
367 * Beacons and probe responses require the tsf timestamp 367 * Beacons and probe responses require the tsf timestamp
368 * to be inserted into the frame, except for a frame that has been injected 368 * to be inserted into the frame.
369 * through a monitor interface. This latter is needed for testing a
370 * monitor interface.
371 */ 369 */
372 if ((ieee80211_is_beacon(hdr->frame_control) || 370 if (ieee80211_is_beacon(hdr->frame_control) ||
373 ieee80211_is_probe_resp(hdr->frame_control)) && 371 ieee80211_is_probe_resp(hdr->frame_control))
374 (!(tx_info->flags & IEEE80211_TX_CTL_INJECTED)))
375 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags); 372 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
376 373
377 /* 374 /*
@@ -566,13 +563,10 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
566 return 0; 563 return 0;
567} 564}
568 565
569int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev, 566int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev,
570 struct ieee80211_vif *vif, 567 struct ieee80211_vif *vif)
571 const bool enable_beacon)
572{ 568{
573 struct rt2x00_intf *intf = vif_to_intf(vif); 569 struct rt2x00_intf *intf = vif_to_intf(vif);
574 struct skb_frame_desc *skbdesc;
575 struct txentry_desc txdesc;
576 570
577 if (unlikely(!intf->beacon)) 571 if (unlikely(!intf->beacon))
578 return -ENOBUFS; 572 return -ENOBUFS;
@@ -584,17 +578,36 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
584 */ 578 */
585 rt2x00queue_free_skb(intf->beacon); 579 rt2x00queue_free_skb(intf->beacon);
586 580
587 if (!enable_beacon) { 581 /*
588 rt2x00queue_stop_queue(intf->beacon->queue); 582 * Clear beacon (single bssid devices don't need to clear the beacon
589 mutex_unlock(&intf->beacon_skb_mutex); 583 * since the beacon queue will get stopped anyway).
590 return 0; 584 */
591 } 585 if (rt2x00dev->ops->lib->clear_beacon)
586 rt2x00dev->ops->lib->clear_beacon(intf->beacon);
587
588 mutex_unlock(&intf->beacon_skb_mutex);
589
590 return 0;
591}
592
593int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev,
594 struct ieee80211_vif *vif)
595{
596 struct rt2x00_intf *intf = vif_to_intf(vif);
597 struct skb_frame_desc *skbdesc;
598 struct txentry_desc txdesc;
599
600 if (unlikely(!intf->beacon))
601 return -ENOBUFS;
602
603 /*
604 * Clean up the beacon skb.
605 */
606 rt2x00queue_free_skb(intf->beacon);
592 607
593 intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif); 608 intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
594 if (!intf->beacon->skb) { 609 if (!intf->beacon->skb)
595 mutex_unlock(&intf->beacon_skb_mutex);
596 return -ENOMEM; 610 return -ENOMEM;
597 }
598 611
599 /* 612 /*
600 * Copy all TX descriptor information into txdesc, 613 * Copy all TX descriptor information into txdesc,
@@ -611,13 +624,25 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
611 skbdesc->entry = intf->beacon; 624 skbdesc->entry = intf->beacon;
612 625
613 /* 626 /*
614 * Send beacon to hardware and enable beacon genaration.. 627 * Send beacon to hardware.
615 */ 628 */
616 rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc); 629 rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc);
617 630
631 return 0;
632
633}
634
635int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
636 struct ieee80211_vif *vif)
637{
638 struct rt2x00_intf *intf = vif_to_intf(vif);
639 int ret;
640
641 mutex_lock(&intf->beacon_skb_mutex);
642 ret = rt2x00queue_update_beacon_locked(rt2x00dev, vif);
618 mutex_unlock(&intf->beacon_skb_mutex); 643 mutex_unlock(&intf->beacon_skb_mutex);
619 644
620 return 0; 645 return ret;
621} 646}
622 647
623void rt2x00queue_for_each_entry(struct data_queue *queue, 648void rt2x00queue_for_each_entry(struct data_queue *queue,
@@ -885,7 +910,7 @@ void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
885 * The queue flush has failed... 910 * The queue flush has failed...
886 */ 911 */
887 if (unlikely(!rt2x00queue_empty(queue))) 912 if (unlikely(!rt2x00queue_empty(queue)))
888 WARNING(queue->rt2x00dev, "Queue %d failed to flush", queue->qid); 913 WARNING(queue->rt2x00dev, "Queue %d failed to flush\n", queue->qid);
889 914
890 /* 915 /*
891 * Restore the queue to the previous status 916 * Restore the queue to the previous status
diff --git a/drivers/net/wireless/rt2x00/rt2x00reg.h b/drivers/net/wireless/rt2x00/rt2x00reg.h
index e8259ae48ce..6f867eec49c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00reg.h
+++ b/drivers/net/wireless/rt2x00/rt2x00reg.h
@@ -85,8 +85,6 @@ enum dev_state {
85 STATE_RADIO_OFF, 85 STATE_RADIO_OFF,
86 STATE_RADIO_IRQ_ON, 86 STATE_RADIO_IRQ_ON,
87 STATE_RADIO_IRQ_OFF, 87 STATE_RADIO_IRQ_OFF,
88 STATE_RADIO_IRQ_ON_ISR,
89 STATE_RADIO_IRQ_OFF_ISR,
90}; 88};
91 89
92/* 90/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 1a9937d5aff..fbe735f5b35 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -227,7 +227,7 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
227 * Schedule the delayed work for reading the TX status 227 * Schedule the delayed work for reading the TX status
228 * from the device. 228 * from the device.
229 */ 229 */
230 ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->txdone_work); 230 queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
231} 231}
232 232
233static void rt2x00usb_kick_tx_entry(struct queue_entry *entry) 233static void rt2x00usb_kick_tx_entry(struct queue_entry *entry)
@@ -320,7 +320,7 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
320 * Schedule the delayed work for reading the RX status 320 * Schedule the delayed work for reading the RX status
321 * from the device. 321 * from the device.
322 */ 322 */
323 ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->rxdone_work); 323 queue_work(rt2x00dev->workqueue, &rt2x00dev->rxdone_work);
324} 324}
325 325
326static void rt2x00usb_kick_rx_entry(struct queue_entry *entry) 326static void rt2x00usb_kick_rx_entry(struct queue_entry *entry)
@@ -429,7 +429,7 @@ void rt2x00usb_flush_queue(struct data_queue *queue)
429 * Schedule the completion handler manually, when this 429 * Schedule the completion handler manually, when this
430 * worker function runs, it should cleanup the queue. 430 * worker function runs, it should cleanup the queue.
431 */ 431 */
432 ieee80211_queue_work(queue->rt2x00dev->hw, completion); 432 queue_work(queue->rt2x00dev->workqueue, completion);
433 433
434 /* 434 /*
435 * Wait for a little while to give the driver 435 * Wait for a little while to give the driver
@@ -453,7 +453,7 @@ static void rt2x00usb_watchdog_tx_status(struct data_queue *queue)
453 WARNING(queue->rt2x00dev, "TX queue %d status timed out," 453 WARNING(queue->rt2x00dev, "TX queue %d status timed out,"
454 " invoke forced tx handler\n", queue->qid); 454 " invoke forced tx handler\n", queue->qid);
455 455
456 ieee80211_queue_work(queue->rt2x00dev->hw, &queue->rt2x00dev->txdone_work); 456 queue_work(queue->rt2x00dev->workqueue, &queue->rt2x00dev->txdone_work);
457} 457}
458 458
459void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev) 459void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev)
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 8de44dd401e..927a4a3e0ee 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -551,26 +551,14 @@ static void rt61pci_config_intf(struct rt2x00_dev *rt2x00dev,
551 struct rt2x00intf_conf *conf, 551 struct rt2x00intf_conf *conf,
552 const unsigned int flags) 552 const unsigned int flags)
553{ 553{
554 unsigned int beacon_base;
555 u32 reg; 554 u32 reg;
556 555
557 if (flags & CONFIG_UPDATE_TYPE) { 556 if (flags & CONFIG_UPDATE_TYPE) {
558 /* 557 /*
559 * Clear current synchronisation setup.
560 * For the Beacon base registers, we only need to clear
561 * the first byte since that byte contains the VALID and OWNER
562 * bits which (when set to 0) will invalidate the entire beacon.
563 */
564 beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx);
565 rt2x00pci_register_write(rt2x00dev, beacon_base, 0);
566
567 /*
568 * Enable synchronisation. 558 * Enable synchronisation.
569 */ 559 */
570 rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg); 560 rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg);
571 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
572 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_SYNC, conf->sync); 561 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_SYNC, conf->sync);
573 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
574 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg); 562 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
575 } 563 }
576 564
@@ -1154,6 +1142,11 @@ static void rt61pci_start_queue(struct data_queue *queue)
1154 rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg); 1142 rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg);
1155 break; 1143 break;
1156 case QID_BEACON: 1144 case QID_BEACON:
1145 /*
1146 * Allow the tbtt tasklet to be scheduled.
1147 */
1148 tasklet_enable(&rt2x00dev->tbtt_tasklet);
1149
1157 rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg); 1150 rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg);
1158 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1); 1151 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
1159 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1); 1152 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
@@ -1233,6 +1226,11 @@ static void rt61pci_stop_queue(struct data_queue *queue)
1233 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 0); 1226 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 0);
1234 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0); 1227 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
1235 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg); 1228 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
1229
1230 /*
1231 * Wait for possibly running tbtt tasklets.
1232 */
1233 tasklet_disable(&rt2x00dev->tbtt_tasklet);
1236 break; 1234 break;
1237 default: 1235 default:
1238 break; 1236 break;
@@ -1719,9 +1717,9 @@ static int rt61pci_init_bbp(struct rt2x00_dev *rt2x00dev)
1719static void rt61pci_toggle_irq(struct rt2x00_dev *rt2x00dev, 1717static void rt61pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
1720 enum dev_state state) 1718 enum dev_state state)
1721{ 1719{
1722 int mask = (state == STATE_RADIO_IRQ_OFF) || 1720 int mask = (state == STATE_RADIO_IRQ_OFF);
1723 (state == STATE_RADIO_IRQ_OFF_ISR);
1724 u32 reg; 1721 u32 reg;
1722 unsigned long flags;
1725 1723
1726 /* 1724 /*
1727 * When interrupts are being enabled, the interrupt registers 1725 * When interrupts are being enabled, the interrupt registers
@@ -1733,12 +1731,21 @@ static void rt61pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
1733 1731
1734 rt2x00pci_register_read(rt2x00dev, MCU_INT_SOURCE_CSR, &reg); 1732 rt2x00pci_register_read(rt2x00dev, MCU_INT_SOURCE_CSR, &reg);
1735 rt2x00pci_register_write(rt2x00dev, MCU_INT_SOURCE_CSR, reg); 1733 rt2x00pci_register_write(rt2x00dev, MCU_INT_SOURCE_CSR, reg);
1734
1735 /*
1736 * Enable tasklets.
1737 */
1738 tasklet_enable(&rt2x00dev->txstatus_tasklet);
1739 tasklet_enable(&rt2x00dev->rxdone_tasklet);
1740 tasklet_enable(&rt2x00dev->autowake_tasklet);
1736 } 1741 }
1737 1742
1738 /* 1743 /*
1739 * Only toggle the interrupts bits we are going to use. 1744 * Only toggle the interrupts bits we are going to use.
1740 * Non-checked interrupt bits are disabled by default. 1745 * Non-checked interrupt bits are disabled by default.
1741 */ 1746 */
1747 spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
1748
1742 rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg); 1749 rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg);
1743 rt2x00_set_field32(&reg, INT_MASK_CSR_TXDONE, mask); 1750 rt2x00_set_field32(&reg, INT_MASK_CSR_TXDONE, mask);
1744 rt2x00_set_field32(&reg, INT_MASK_CSR_RXDONE, mask); 1751 rt2x00_set_field32(&reg, INT_MASK_CSR_RXDONE, mask);
@@ -1758,6 +1765,17 @@ static void rt61pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
1758 rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_7, mask); 1765 rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_7, mask);
1759 rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_TWAKEUP, mask); 1766 rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_TWAKEUP, mask);
1760 rt2x00pci_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg); 1767 rt2x00pci_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg);
1768
1769 spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
1770
1771 if (state == STATE_RADIO_IRQ_OFF) {
1772 /*
1773 * Ensure that all tasklets are finished.
1774 */
1775 tasklet_disable(&rt2x00dev->txstatus_tasklet);
1776 tasklet_disable(&rt2x00dev->rxdone_tasklet);
1777 tasklet_disable(&rt2x00dev->autowake_tasklet);
1778 }
1761} 1779}
1762 1780
1763static int rt61pci_enable_radio(struct rt2x00_dev *rt2x00dev) 1781static int rt61pci_enable_radio(struct rt2x00_dev *rt2x00dev)
@@ -1833,9 +1851,7 @@ static int rt61pci_set_device_state(struct rt2x00_dev *rt2x00dev,
1833 rt61pci_disable_radio(rt2x00dev); 1851 rt61pci_disable_radio(rt2x00dev);
1834 break; 1852 break;
1835 case STATE_RADIO_IRQ_ON: 1853 case STATE_RADIO_IRQ_ON:
1836 case STATE_RADIO_IRQ_ON_ISR:
1837 case STATE_RADIO_IRQ_OFF: 1854 case STATE_RADIO_IRQ_OFF:
1838 case STATE_RADIO_IRQ_OFF_ISR:
1839 rt61pci_toggle_irq(rt2x00dev, state); 1855 rt61pci_toggle_irq(rt2x00dev, state);
1840 break; 1856 break;
1841 case STATE_DEEP_SLEEP: 1857 case STATE_DEEP_SLEEP:
@@ -1962,13 +1978,14 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
1962 struct queue_entry_priv_pci *entry_priv = entry->priv_data; 1978 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
1963 unsigned int beacon_base; 1979 unsigned int beacon_base;
1964 unsigned int padding_len; 1980 unsigned int padding_len;
1965 u32 reg; 1981 u32 orig_reg, reg;
1966 1982
1967 /* 1983 /*
1968 * Disable beaconing while we are reloading the beacon data, 1984 * Disable beaconing while we are reloading the beacon data,
1969 * otherwise we might be sending out invalid data. 1985 * otherwise we might be sending out invalid data.
1970 */ 1986 */
1971 rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg); 1987 rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg);
1988 orig_reg = reg;
1972 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0); 1989 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
1973 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg); 1990 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
1974 1991
@@ -1986,7 +2003,14 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
1986 * Write entire beacon with descriptor and padding to register. 2003 * Write entire beacon with descriptor and padding to register.
1987 */ 2004 */
1988 padding_len = roundup(entry->skb->len, 4) - entry->skb->len; 2005 padding_len = roundup(entry->skb->len, 4) - entry->skb->len;
1989 skb_pad(entry->skb, padding_len); 2006 if (padding_len && skb_pad(entry->skb, padding_len)) {
2007 ERROR(rt2x00dev, "Failure padding beacon, aborting\n");
2008 /* skb freed by skb_pad() on failure */
2009 entry->skb = NULL;
2010 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, orig_reg);
2011 return;
2012 }
2013
1990 beacon_base = HW_BEACON_OFFSET(entry->entry_idx); 2014 beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
1991 rt2x00pci_register_multiwrite(rt2x00dev, beacon_base, 2015 rt2x00pci_register_multiwrite(rt2x00dev, beacon_base,
1992 entry_priv->desc, TXINFO_SIZE); 2016 entry_priv->desc, TXINFO_SIZE);
@@ -2002,8 +2026,6 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
2002 */ 2026 */
2003 rt2x00pci_register_write(rt2x00dev, TXRX_CSR10, 0x00001008); 2027 rt2x00pci_register_write(rt2x00dev, TXRX_CSR10, 0x00001008);
2004 2028
2005 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
2006 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
2007 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1); 2029 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
2008 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg); 2030 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
2009 2031
@@ -2014,6 +2036,32 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
2014 entry->skb = NULL; 2036 entry->skb = NULL;
2015} 2037}
2016 2038
2039static void rt61pci_clear_beacon(struct queue_entry *entry)
2040{
2041 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
2042 u32 reg;
2043
2044 /*
2045 * Disable beaconing while we are reloading the beacon data,
2046 * otherwise we might be sending out invalid data.
2047 */
2048 rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg);
2049 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
2050 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
2051
2052 /*
2053 * Clear beacon.
2054 */
2055 rt2x00pci_register_write(rt2x00dev,
2056 HW_BEACON_OFFSET(entry->entry_idx), 0);
2057
2058 /*
2059 * Enable beaconing again.
2060 */
2061 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
2062 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
2063}
2064
2017/* 2065/*
2018 * RX control handlers 2066 * RX control handlers
2019 */ 2067 */
@@ -2078,9 +2126,8 @@ static void rt61pci_fill_rxdone(struct queue_entry *entry,
2078 rxdesc->flags |= RX_FLAG_IV_STRIPPED; 2126 rxdesc->flags |= RX_FLAG_IV_STRIPPED;
2079 2127
2080 /* 2128 /*
2081 * FIXME: Legacy driver indicates that the frame does 2129 * The hardware has already checked the Michael Mic and has
2082 * contain the Michael Mic. Unfortunately, in rt2x00 2130 * stripped it from the frame. Signal this to mac80211.
2083 * the MIC seems to be missing completely...
2084 */ 2131 */
2085 rxdesc->flags |= RX_FLAG_MMIC_STRIPPED; 2132 rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
2086 2133
@@ -2211,61 +2258,80 @@ static void rt61pci_wakeup(struct rt2x00_dev *rt2x00dev)
2211 rt61pci_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS); 2258 rt61pci_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
2212} 2259}
2213 2260
2214static irqreturn_t rt61pci_interrupt_thread(int irq, void *dev_instance) 2261static void rt61pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
2262 struct rt2x00_field32 irq_field)
2215{ 2263{
2216 struct rt2x00_dev *rt2x00dev = dev_instance; 2264 unsigned long flags;
2217 u32 reg = rt2x00dev->irqvalue[0]; 2265 u32 reg;
2218 u32 reg_mcu = rt2x00dev->irqvalue[1];
2219 2266
2220 /* 2267 /*
2221 * Handle interrupts, walk through all bits 2268 * Enable a single interrupt. The interrupt mask register
2222 * and run the tasks, the bits are checked in order of 2269 * access needs locking.
2223 * priority.
2224 */ 2270 */
2271 spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
2225 2272
2226 /* 2273 rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg);
2227 * 1 - Rx ring done interrupt. 2274 rt2x00_set_field32(&reg, irq_field, 0);
2228 */ 2275 rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg);
2229 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RXDONE))
2230 rt2x00pci_rxdone(rt2x00dev);
2231 2276
2232 /* 2277 spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
2233 * 2 - Tx ring done interrupt. 2278}
2234 */
2235 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TXDONE))
2236 rt61pci_txdone(rt2x00dev);
2237 2279
2238 /* 2280static void rt61pci_enable_mcu_interrupt(struct rt2x00_dev *rt2x00dev,
2239 * 3 - Handle MCU command done. 2281 struct rt2x00_field32 irq_field)
2240 */ 2282{
2241 if (reg_mcu) 2283 unsigned long flags;
2242 rt2x00pci_register_write(rt2x00dev, 2284 u32 reg;
2243 M2H_CMD_DONE_CSR, 0xffffffff);
2244 2285
2245 /* 2286 /*
2246 * 4 - MCU Autowakeup interrupt. 2287 * Enable a single MCU interrupt. The interrupt mask register
2288 * access needs locking.
2247 */ 2289 */
2248 if (rt2x00_get_field32(reg_mcu, MCU_INT_SOURCE_CSR_TWAKEUP)) 2290 spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
2249 rt61pci_wakeup(rt2x00dev);
2250 2291
2251 /* 2292 rt2x00pci_register_read(rt2x00dev, MCU_INT_MASK_CSR, &reg);
2252 * 5 - Beacon done interrupt. 2293 rt2x00_set_field32(&reg, irq_field, 0);
2253 */ 2294 rt2x00pci_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg);
2254 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_BEACON_DONE))
2255 rt2x00lib_beacondone(rt2x00dev);
2256 2295
2257 /* Enable interrupts again. */ 2296 spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
2258 rt2x00dev->ops->lib->set_device_state(rt2x00dev, 2297}
2259 STATE_RADIO_IRQ_ON_ISR); 2298
2260 return IRQ_HANDLED; 2299static void rt61pci_txstatus_tasklet(unsigned long data)
2300{
2301 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
2302 rt61pci_txdone(rt2x00dev);
2303 rt61pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_TXDONE);
2304}
2305
2306static void rt61pci_tbtt_tasklet(unsigned long data)
2307{
2308 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
2309 rt2x00lib_beacondone(rt2x00dev);
2310 rt61pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_BEACON_DONE);
2311}
2312
2313static void rt61pci_rxdone_tasklet(unsigned long data)
2314{
2315 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
2316 rt2x00pci_rxdone(rt2x00dev);
2317 rt61pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_RXDONE);
2261} 2318}
2262 2319
2320static void rt61pci_autowake_tasklet(unsigned long data)
2321{
2322 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
2323 rt61pci_wakeup(rt2x00dev);
2324 rt2x00pci_register_write(rt2x00dev,
2325 M2H_CMD_DONE_CSR, 0xffffffff);
2326 rt61pci_enable_mcu_interrupt(rt2x00dev, MCU_INT_MASK_CSR_TWAKEUP);
2327}
2263 2328
2264static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance) 2329static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance)
2265{ 2330{
2266 struct rt2x00_dev *rt2x00dev = dev_instance; 2331 struct rt2x00_dev *rt2x00dev = dev_instance;
2267 u32 reg_mcu; 2332 u32 reg_mcu, mask_mcu;
2268 u32 reg; 2333 u32 reg, mask;
2334 unsigned long flags;
2269 2335
2270 /* 2336 /*
2271 * Get the interrupt sources & saved to local variable. 2337 * Get the interrupt sources & saved to local variable.
@@ -2283,14 +2349,46 @@ static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance)
2283 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 2349 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
2284 return IRQ_HANDLED; 2350 return IRQ_HANDLED;
2285 2351
2286 /* Store irqvalues for use in the interrupt thread. */ 2352 /*
2287 rt2x00dev->irqvalue[0] = reg; 2353 * Schedule tasklets for interrupt handling.
2288 rt2x00dev->irqvalue[1] = reg_mcu; 2354 */
2355 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RXDONE))
2356 tasklet_schedule(&rt2x00dev->rxdone_tasklet);
2357
2358 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TXDONE))
2359 tasklet_schedule(&rt2x00dev->txstatus_tasklet);
2360
2361 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_BEACON_DONE))
2362 tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
2363
2364 if (rt2x00_get_field32(reg_mcu, MCU_INT_SOURCE_CSR_TWAKEUP))
2365 tasklet_schedule(&rt2x00dev->autowake_tasklet);
2366
2367 /*
2368 * Since INT_MASK_CSR and INT_SOURCE_CSR use the same bits
2369 * for interrupts and interrupt masks we can just use the value of
2370 * INT_SOURCE_CSR to create the interrupt mask.
2371 */
2372 mask = reg;
2373 mask_mcu = reg_mcu;
2289 2374
2290 /* Disable interrupts, will be enabled again in the interrupt thread. */ 2375 /*
2291 rt2x00dev->ops->lib->set_device_state(rt2x00dev, 2376 * Disable all interrupts for which a tasklet was scheduled right now,
2292 STATE_RADIO_IRQ_OFF_ISR); 2377 * the tasklet will reenable the appropriate interrupts.
2293 return IRQ_WAKE_THREAD; 2378 */
2379 spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
2380
2381 rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg);
2382 reg |= mask;
2383 rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg);
2384
2385 rt2x00pci_register_read(rt2x00dev, MCU_INT_MASK_CSR, &reg);
2386 reg |= mask_mcu;
2387 rt2x00pci_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg);
2388
2389 spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
2390
2391 return IRQ_HANDLED;
2294} 2392}
2295 2393
2296/* 2394/*
@@ -2884,7 +2982,10 @@ static const struct ieee80211_ops rt61pci_mac80211_ops = {
2884 2982
2885static const struct rt2x00lib_ops rt61pci_rt2x00_ops = { 2983static const struct rt2x00lib_ops rt61pci_rt2x00_ops = {
2886 .irq_handler = rt61pci_interrupt, 2984 .irq_handler = rt61pci_interrupt,
2887 .irq_handler_thread = rt61pci_interrupt_thread, 2985 .txstatus_tasklet = rt61pci_txstatus_tasklet,
2986 .tbtt_tasklet = rt61pci_tbtt_tasklet,
2987 .rxdone_tasklet = rt61pci_rxdone_tasklet,
2988 .autowake_tasklet = rt61pci_autowake_tasklet,
2888 .probe_hw = rt61pci_probe_hw, 2989 .probe_hw = rt61pci_probe_hw,
2889 .get_firmware_name = rt61pci_get_firmware_name, 2990 .get_firmware_name = rt61pci_get_firmware_name,
2890 .check_firmware = rt61pci_check_firmware, 2991 .check_firmware = rt61pci_check_firmware,
@@ -2903,6 +3004,7 @@ static const struct rt2x00lib_ops rt61pci_rt2x00_ops = {
2903 .stop_queue = rt61pci_stop_queue, 3004 .stop_queue = rt61pci_stop_queue,
2904 .write_tx_desc = rt61pci_write_tx_desc, 3005 .write_tx_desc = rt61pci_write_tx_desc,
2905 .write_beacon = rt61pci_write_beacon, 3006 .write_beacon = rt61pci_write_beacon,
3007 .clear_beacon = rt61pci_clear_beacon,
2906 .fill_rxdone = rt61pci_fill_rxdone, 3008 .fill_rxdone = rt61pci_fill_rxdone,
2907 .config_shared_key = rt61pci_config_shared_key, 3009 .config_shared_key = rt61pci_config_shared_key,
2908 .config_pairwise_key = rt61pci_config_pairwise_key, 3010 .config_pairwise_key = rt61pci_config_pairwise_key,
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 029be3c6c03..6e9981a1dd7 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -502,26 +502,14 @@ static void rt73usb_config_intf(struct rt2x00_dev *rt2x00dev,
502 struct rt2x00intf_conf *conf, 502 struct rt2x00intf_conf *conf,
503 const unsigned int flags) 503 const unsigned int flags)
504{ 504{
505 unsigned int beacon_base;
506 u32 reg; 505 u32 reg;
507 506
508 if (flags & CONFIG_UPDATE_TYPE) { 507 if (flags & CONFIG_UPDATE_TYPE) {
509 /* 508 /*
510 * Clear current synchronisation setup.
511 * For the Beacon base registers we only need to clear
512 * the first byte since that byte contains the VALID and OWNER
513 * bits which (when set to 0) will invalidate the entire beacon.
514 */
515 beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx);
516 rt2x00usb_register_write(rt2x00dev, beacon_base, 0);
517
518 /*
519 * Enable synchronisation. 509 * Enable synchronisation.
520 */ 510 */
521 rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg); 511 rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
522 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
523 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_SYNC, conf->sync); 512 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_SYNC, conf->sync);
524 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
525 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg); 513 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
526 } 514 }
527 515
@@ -1440,9 +1428,7 @@ static int rt73usb_set_device_state(struct rt2x00_dev *rt2x00dev,
1440 rt73usb_disable_radio(rt2x00dev); 1428 rt73usb_disable_radio(rt2x00dev);
1441 break; 1429 break;
1442 case STATE_RADIO_IRQ_ON: 1430 case STATE_RADIO_IRQ_ON:
1443 case STATE_RADIO_IRQ_ON_ISR:
1444 case STATE_RADIO_IRQ_OFF: 1431 case STATE_RADIO_IRQ_OFF:
1445 case STATE_RADIO_IRQ_OFF_ISR:
1446 /* No support, but no error either */ 1432 /* No support, but no error either */
1447 break; 1433 break;
1448 case STATE_DEEP_SLEEP: 1434 case STATE_DEEP_SLEEP:
@@ -1547,13 +1533,14 @@ static void rt73usb_write_beacon(struct queue_entry *entry,
1547 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 1533 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
1548 unsigned int beacon_base; 1534 unsigned int beacon_base;
1549 unsigned int padding_len; 1535 unsigned int padding_len;
1550 u32 reg; 1536 u32 orig_reg, reg;
1551 1537
1552 /* 1538 /*
1553 * Disable beaconing while we are reloading the beacon data, 1539 * Disable beaconing while we are reloading the beacon data,
1554 * otherwise we might be sending out invalid data. 1540 * otherwise we might be sending out invalid data.
1555 */ 1541 */
1556 rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg); 1542 rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
1543 orig_reg = reg;
1557 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0); 1544 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
1558 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg); 1545 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
1559 1546
@@ -1577,7 +1564,14 @@ static void rt73usb_write_beacon(struct queue_entry *entry,
1577 * Write entire beacon with descriptor and padding to register. 1564 * Write entire beacon with descriptor and padding to register.
1578 */ 1565 */
1579 padding_len = roundup(entry->skb->len, 4) - entry->skb->len; 1566 padding_len = roundup(entry->skb->len, 4) - entry->skb->len;
1580 skb_pad(entry->skb, padding_len); 1567 if (padding_len && skb_pad(entry->skb, padding_len)) {
1568 ERROR(rt2x00dev, "Failure padding beacon, aborting\n");
1569 /* skb freed by skb_pad() on failure */
1570 entry->skb = NULL;
1571 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, orig_reg);
1572 return;
1573 }
1574
1581 beacon_base = HW_BEACON_OFFSET(entry->entry_idx); 1575 beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
1582 rt2x00usb_register_multiwrite(rt2x00dev, beacon_base, entry->skb->data, 1576 rt2x00usb_register_multiwrite(rt2x00dev, beacon_base, entry->skb->data,
1583 entry->skb->len + padding_len); 1577 entry->skb->len + padding_len);
@@ -1590,8 +1584,6 @@ static void rt73usb_write_beacon(struct queue_entry *entry,
1590 */ 1584 */
1591 rt2x00usb_register_write(rt2x00dev, TXRX_CSR10, 0x00001008); 1585 rt2x00usb_register_write(rt2x00dev, TXRX_CSR10, 0x00001008);
1592 1586
1593 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
1594 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
1595 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1); 1587 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
1596 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg); 1588 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
1597 1589
@@ -1602,6 +1594,33 @@ static void rt73usb_write_beacon(struct queue_entry *entry,
1602 entry->skb = NULL; 1594 entry->skb = NULL;
1603} 1595}
1604 1596
1597static void rt73usb_clear_beacon(struct queue_entry *entry)
1598{
1599 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
1600 unsigned int beacon_base;
1601 u32 reg;
1602
1603 /*
1604 * Disable beaconing while we are reloading the beacon data,
1605 * otherwise we might be sending out invalid data.
1606 */
1607 rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
1608 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
1609 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
1610
1611 /*
1612 * Clear beacon.
1613 */
1614 beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
1615 rt2x00usb_register_write(rt2x00dev, beacon_base, 0);
1616
1617 /*
1618 * Enable beaconing again.
1619 */
1620 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
1621 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
1622}
1623
1605static int rt73usb_get_tx_data_len(struct queue_entry *entry) 1624static int rt73usb_get_tx_data_len(struct queue_entry *entry)
1606{ 1625{
1607 int length; 1626 int length;
@@ -1698,9 +1717,8 @@ static void rt73usb_fill_rxdone(struct queue_entry *entry,
1698 rxdesc->flags |= RX_FLAG_IV_STRIPPED; 1717 rxdesc->flags |= RX_FLAG_IV_STRIPPED;
1699 1718
1700 /* 1719 /*
1701 * FIXME: Legacy driver indicates that the frame does 1720 * The hardware has already checked the Michael Mic and has
1702 * contain the Michael Mic. Unfortunately, in rt2x00 1721 * stripped it from the frame. Signal this to mac80211.
1703 * the MIC seems to be missing completely...
1704 */ 1722 */
1705 rxdesc->flags |= RX_FLAG_MMIC_STRIPPED; 1723 rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
1706 1724
@@ -2313,6 +2331,7 @@ static const struct rt2x00lib_ops rt73usb_rt2x00_ops = {
2313 .flush_queue = rt2x00usb_flush_queue, 2331 .flush_queue = rt2x00usb_flush_queue,
2314 .write_tx_desc = rt73usb_write_tx_desc, 2332 .write_tx_desc = rt73usb_write_tx_desc,
2315 .write_beacon = rt73usb_write_beacon, 2333 .write_beacon = rt73usb_write_beacon,
2334 .clear_beacon = rt73usb_clear_beacon,
2316 .get_tx_data_len = rt73usb_get_tx_data_len, 2335 .get_tx_data_len = rt73usb_get_tx_data_len,
2317 .fill_rxdone = rt73usb_fill_rxdone, 2336 .fill_rxdone = rt73usb_fill_rxdone,
2318 .config_shared_key = rt73usb_config_shared_key, 2337 .config_shared_key = rt73usb_config_shared_key,
diff --git a/drivers/net/wireless/rtlwifi/Kconfig b/drivers/net/wireless/rtlwifi/Kconfig
index 7f6573f7f47..86f8d4d6403 100644
--- a/drivers/net/wireless/rtlwifi/Kconfig
+++ b/drivers/net/wireless/rtlwifi/Kconfig
@@ -1,6 +1,6 @@
1config RTL8192CE 1config RTL8192CE
2 tristate "Realtek RTL8192CE/RTL8188SE Wireless Network Adapter" 2 tristate "Realtek RTL8192CE/RTL8188CE Wireless Network Adapter"
3 depends on MAC80211 && EXPERIMENTAL 3 depends on MAC80211 && PCI && EXPERIMENTAL
4 select FW_LOADER 4 select FW_LOADER
5 select RTLWIFI 5 select RTLWIFI
6 ---help--- 6 ---help---
@@ -9,7 +9,18 @@ config RTL8192CE
9 9
10 If you choose to build it as a module, it will be called rtl8192ce 10 If you choose to build it as a module, it will be called rtl8192ce
11 11
12config RTL8192CU
13 tristate "Realtek RTL8192CU/RTL8188CU USB Wireless Network Adapter"
14 depends on MAC80211 && USB && EXPERIMENTAL
15 select FW_LOADER
16 select RTLWIFI
17 ---help---
18 This is the driver for Realtek RTL8192CU/RTL8188CU 802.11n USB
19 wireless network adapters.
20
21 If you choose to build it as a module, it will be called rtl8192cu
22
12config RTLWIFI 23config RTLWIFI
13 tristate 24 tristate
14 depends on RTL8192CE 25 depends on RTL8192CE || RTL8192CU
15 default m 26 default m
diff --git a/drivers/net/wireless/rtlwifi/Makefile b/drivers/net/wireless/rtlwifi/Makefile
index 2a7a4384f8e..c3e83a1da33 100644
--- a/drivers/net/wireless/rtlwifi/Makefile
+++ b/drivers/net/wireless/rtlwifi/Makefile
@@ -8,6 +8,10 @@ rtlwifi-objs := \
8 pci.o \ 8 pci.o \
9 ps.o \ 9 ps.o \
10 rc.o \ 10 rc.o \
11 regd.o 11 regd.o \
12 usb.o
12 13
13obj-$(CONFIG_RTL8192CE) += rtl8192ce/ 14obj-$(CONFIG_RTL8192CE) += rtl8192ce/
15obj-$(CONFIG_RTL8192CU) += rtl8192cu/
16
17ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index cf0b73e51fc..3f40dc2b129 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -144,7 +144,7 @@ static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw,
144 ht_cap->mcs.rx_mask[1] = 0xFF; 144 ht_cap->mcs.rx_mask[1] = 0xFF;
145 ht_cap->mcs.rx_mask[4] = 0x01; 145 ht_cap->mcs.rx_mask[4] = 0x01;
146 146
147 ht_cap->mcs.rx_highest = MAX_BIT_RATE_40MHZ_MCS15; 147 ht_cap->mcs.rx_highest = cpu_to_le16(MAX_BIT_RATE_40MHZ_MCS15);
148 } else if (get_rf_type(rtlphy) == RF_1T1R) { 148 } else if (get_rf_type(rtlphy) == RF_1T1R) {
149 149
150 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("1T1R\n")); 150 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("1T1R\n"));
@@ -153,7 +153,7 @@ static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw,
153 ht_cap->mcs.rx_mask[1] = 0x00; 153 ht_cap->mcs.rx_mask[1] = 0x00;
154 ht_cap->mcs.rx_mask[4] = 0x01; 154 ht_cap->mcs.rx_mask[4] = 0x01;
155 155
156 ht_cap->mcs.rx_highest = MAX_BIT_RATE_40MHZ_MCS7; 156 ht_cap->mcs.rx_highest = cpu_to_le16(MAX_BIT_RATE_40MHZ_MCS7);
157 } 157 }
158} 158}
159 159
@@ -399,21 +399,21 @@ static void _rtl_query_protection_mode(struct ieee80211_hw *hw,
399 u8 rate_flag = info->control.rates[0].flags; 399 u8 rate_flag = info->control.rates[0].flags;
400 400
401 /* Common Settings */ 401 /* Common Settings */
402 tcb_desc->b_rts_stbc = false; 402 tcb_desc->rts_stbc = false;
403 tcb_desc->b_cts_enable = false; 403 tcb_desc->cts_enable = false;
404 tcb_desc->rts_sc = 0; 404 tcb_desc->rts_sc = 0;
405 tcb_desc->b_rts_bw = false; 405 tcb_desc->rts_bw = false;
406 tcb_desc->b_rts_use_shortpreamble = false; 406 tcb_desc->rts_use_shortpreamble = false;
407 tcb_desc->b_rts_use_shortgi = false; 407 tcb_desc->rts_use_shortgi = false;
408 408
409 if (rate_flag & IEEE80211_TX_RC_USE_CTS_PROTECT) { 409 if (rate_flag & IEEE80211_TX_RC_USE_CTS_PROTECT) {
410 /* Use CTS-to-SELF in protection mode. */ 410 /* Use CTS-to-SELF in protection mode. */
411 tcb_desc->b_rts_enable = true; 411 tcb_desc->rts_enable = true;
412 tcb_desc->b_cts_enable = true; 412 tcb_desc->cts_enable = true;
413 tcb_desc->rts_rate = rtlpriv->cfg->maps[RTL_RC_OFDM_RATE24M]; 413 tcb_desc->rts_rate = rtlpriv->cfg->maps[RTL_RC_OFDM_RATE24M];
414 } else if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) { 414 } else if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) {
415 /* Use RTS-CTS in protection mode. */ 415 /* Use RTS-CTS in protection mode. */
416 tcb_desc->b_rts_enable = true; 416 tcb_desc->rts_enable = true;
417 tcb_desc->rts_rate = rtlpriv->cfg->maps[RTL_RC_OFDM_RATE24M]; 417 tcb_desc->rts_rate = rtlpriv->cfg->maps[RTL_RC_OFDM_RATE24M];
418 } 418 }
419 419
@@ -429,7 +429,7 @@ static void _rtl_txrate_selectmode(struct ieee80211_hw *hw,
429 if (mac->opmode == NL80211_IFTYPE_STATION) 429 if (mac->opmode == NL80211_IFTYPE_STATION)
430 tcb_desc->ratr_index = 0; 430 tcb_desc->ratr_index = 0;
431 else if (mac->opmode == NL80211_IFTYPE_ADHOC) { 431 else if (mac->opmode == NL80211_IFTYPE_ADHOC) {
432 if (tcb_desc->b_multicast || tcb_desc->b_broadcast) { 432 if (tcb_desc->multicast || tcb_desc->broadcast) {
433 tcb_desc->hw_rate = 433 tcb_desc->hw_rate =
434 rtlpriv->cfg->maps[RTL_RC_CCK_RATE2M]; 434 rtlpriv->cfg->maps[RTL_RC_CCK_RATE2M];
435 tcb_desc->use_driver_rate = 1; 435 tcb_desc->use_driver_rate = 1;
@@ -439,7 +439,7 @@ static void _rtl_txrate_selectmode(struct ieee80211_hw *hw,
439 } 439 }
440 } 440 }
441 441
442 if (rtlpriv->dm.b_useramask) { 442 if (rtlpriv->dm.useramask) {
443 /* TODO we will differentiate adhoc and station futrue */ 443 /* TODO we will differentiate adhoc and station futrue */
444 tcb_desc->mac_id = 0; 444 tcb_desc->mac_id = 0;
445 445
@@ -461,19 +461,19 @@ static void _rtl_query_bandwidth_mode(struct ieee80211_hw *hw,
461 struct rtl_priv *rtlpriv = rtl_priv(hw); 461 struct rtl_priv *rtlpriv = rtl_priv(hw);
462 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 462 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
463 463
464 tcb_desc->b_packet_bw = false; 464 tcb_desc->packet_bw = false;
465 465
466 if (!mac->bw_40 || !mac->ht_enable) 466 if (!mac->bw_40 || !mac->ht_enable)
467 return; 467 return;
468 468
469 if (tcb_desc->b_multicast || tcb_desc->b_broadcast) 469 if (tcb_desc->multicast || tcb_desc->broadcast)
470 return; 470 return;
471 471
472 /*use legency rate, shall use 20MHz */ 472 /*use legency rate, shall use 20MHz */
473 if (tcb_desc->hw_rate <= rtlpriv->cfg->maps[RTL_RC_OFDM_RATE54M]) 473 if (tcb_desc->hw_rate <= rtlpriv->cfg->maps[RTL_RC_OFDM_RATE54M])
474 return; 474 return;
475 475
476 tcb_desc->b_packet_bw = true; 476 tcb_desc->packet_bw = true;
477} 477}
478 478
479static u8 _rtl_get_highest_n_rate(struct ieee80211_hw *hw) 479static u8 _rtl_get_highest_n_rate(struct ieee80211_hw *hw)
@@ -498,7 +498,7 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw,
498 struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw)); 498 struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
499 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); 499 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
500 struct ieee80211_rate *txrate; 500 struct ieee80211_rate *txrate;
501 u16 fc = le16_to_cpu(hdr->frame_control); 501 __le16 fc = hdr->frame_control;
502 502
503 memset(tcb_desc, 0, sizeof(struct rtl_tcb_desc)); 503 memset(tcb_desc, 0, sizeof(struct rtl_tcb_desc));
504 504
@@ -545,9 +545,9 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw,
545 } 545 }
546 546
547 if (is_multicast_ether_addr(ieee80211_get_DA(hdr))) 547 if (is_multicast_ether_addr(ieee80211_get_DA(hdr)))
548 tcb_desc->b_multicast = 1; 548 tcb_desc->multicast = 1;
549 else if (is_broadcast_ether_addr(ieee80211_get_DA(hdr))) 549 else if (is_broadcast_ether_addr(ieee80211_get_DA(hdr)))
550 tcb_desc->b_broadcast = 1; 550 tcb_desc->broadcast = 1;
551 551
552 _rtl_txrate_selectmode(hw, tcb_desc); 552 _rtl_txrate_selectmode(hw, tcb_desc);
553 _rtl_query_bandwidth_mode(hw, tcb_desc); 553 _rtl_query_bandwidth_mode(hw, tcb_desc);
@@ -570,7 +570,7 @@ bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb)
570 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 570 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
571 struct rtl_priv *rtlpriv = rtl_priv(hw); 571 struct rtl_priv *rtlpriv = rtl_priv(hw);
572 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); 572 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
573 u16 fc = le16_to_cpu(hdr->frame_control); 573 __le16 fc = hdr->frame_control;
574 574
575 if (ieee80211_is_auth(fc)) { 575 if (ieee80211_is_auth(fc)) {
576 RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, ("MAC80211_LINKING\n")); 576 RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, ("MAC80211_LINKING\n"));
@@ -587,7 +587,7 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
587 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 587 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
588 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); 588 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
589 struct rtl_priv *rtlpriv = rtl_priv(hw); 589 struct rtl_priv *rtlpriv = rtl_priv(hw);
590 u16 fc = le16_to_cpu(hdr->frame_control); 590 __le16 fc = hdr->frame_control;
591 u8 *act = (u8 *) (((u8 *) skb->data + MAC80211_3ADDR_LEN)); 591 u8 *act = (u8 *) (((u8 *) skb->data + MAC80211_3ADDR_LEN));
592 u8 category; 592 u8 category;
593 593
@@ -632,7 +632,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
632 struct rtl_priv *rtlpriv = rtl_priv(hw); 632 struct rtl_priv *rtlpriv = rtl_priv(hw);
633 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); 633 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
634 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 634 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
635 u16 fc = le16_to_cpu(hdr->frame_control); 635 __le16 fc = hdr->frame_control;
636 u16 ether_type; 636 u16 ether_type;
637 u8 mac_hdr_len = ieee80211_get_hdrlen_from_skb(skb); 637 u8 mac_hdr_len = ieee80211_get_hdrlen_from_skb(skb);
638 const struct iphdr *ip; 638 const struct iphdr *ip;
@@ -646,7 +646,6 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
646 ip = (struct iphdr *)((u8 *) skb->data + mac_hdr_len + 646 ip = (struct iphdr *)((u8 *) skb->data + mac_hdr_len +
647 SNAP_SIZE + PROTOC_TYPE_SIZE); 647 SNAP_SIZE + PROTOC_TYPE_SIZE);
648 ether_type = *(u16 *) ((u8 *) skb->data + mac_hdr_len + SNAP_SIZE); 648 ether_type = *(u16 *) ((u8 *) skb->data + mac_hdr_len + SNAP_SIZE);
649 ether_type = ntohs(ether_type);
650 649
651 if (ETH_P_IP == ether_type) { 650 if (ETH_P_IP == ether_type) {
652 if (IPPROTO_UDP == ip->protocol) { 651 if (IPPROTO_UDP == ip->protocol) {
@@ -690,7 +689,8 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
690 } 689 }
691 690
692 return true; 691 return true;
693 } else if (0x86DD == ether_type) { 692 } else if (ETH_P_IPV6 == ether_type) {
693 /* IPv6 */
694 return true; 694 return true;
695 } 695 }
696 696
@@ -777,10 +777,10 @@ void rtl_watchdog_wq_callback(void *data)
777 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 777 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
778 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 778 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
779 779
780 bool b_busytraffic = false; 780 bool busytraffic = false;
781 bool b_higher_busytraffic = false; 781 bool higher_busytraffic = false;
782 bool b_higher_busyrxtraffic = false; 782 bool higher_busyrxtraffic = false;
783 bool b_higher_busytxtraffic = false; 783 bool higher_busytxtraffic = false;
784 784
785 u8 idx = 0; 785 u8 idx = 0;
786 u32 rx_cnt_inp4eriod = 0; 786 u32 rx_cnt_inp4eriod = 0;
@@ -788,7 +788,7 @@ void rtl_watchdog_wq_callback(void *data)
788 u32 aver_rx_cnt_inperiod = 0; 788 u32 aver_rx_cnt_inperiod = 0;
789 u32 aver_tx_cnt_inperiod = 0; 789 u32 aver_tx_cnt_inperiod = 0;
790 790
791 bool benter_ps = false; 791 bool enter_ps = false;
792 792
793 if (is_hal_stop(rtlhal)) 793 if (is_hal_stop(rtlhal))
794 return; 794 return;
@@ -832,29 +832,29 @@ void rtl_watchdog_wq_callback(void *data)
832 832
833 /* (2) check traffic busy */ 833 /* (2) check traffic busy */
834 if (aver_rx_cnt_inperiod > 100 || aver_tx_cnt_inperiod > 100) 834 if (aver_rx_cnt_inperiod > 100 || aver_tx_cnt_inperiod > 100)
835 b_busytraffic = true; 835 busytraffic = true;
836 836
837 /* Higher Tx/Rx data. */ 837 /* Higher Tx/Rx data. */
838 if (aver_rx_cnt_inperiod > 4000 || 838 if (aver_rx_cnt_inperiod > 4000 ||
839 aver_tx_cnt_inperiod > 4000) { 839 aver_tx_cnt_inperiod > 4000) {
840 b_higher_busytraffic = true; 840 higher_busytraffic = true;
841 841
842 /* Extremely high Rx data. */ 842 /* Extremely high Rx data. */
843 if (aver_rx_cnt_inperiod > 5000) 843 if (aver_rx_cnt_inperiod > 5000)
844 b_higher_busyrxtraffic = true; 844 higher_busyrxtraffic = true;
845 else 845 else
846 b_higher_busytxtraffic = false; 846 higher_busytxtraffic = false;
847 } 847 }
848 848
849 if (((rtlpriv->link_info.num_rx_inperiod + 849 if (((rtlpriv->link_info.num_rx_inperiod +
850 rtlpriv->link_info.num_tx_inperiod) > 8) || 850 rtlpriv->link_info.num_tx_inperiod) > 8) ||
851 (rtlpriv->link_info.num_rx_inperiod > 2)) 851 (rtlpriv->link_info.num_rx_inperiod > 2))
852 benter_ps = false; 852 enter_ps = false;
853 else 853 else
854 benter_ps = true; 854 enter_ps = true;
855 855
856 /* LeisurePS only work in infra mode. */ 856 /* LeisurePS only work in infra mode. */
857 if (benter_ps) 857 if (enter_ps)
858 rtl_lps_enter(hw); 858 rtl_lps_enter(hw);
859 else 859 else
860 rtl_lps_leave(hw); 860 rtl_lps_leave(hw);
@@ -863,9 +863,9 @@ void rtl_watchdog_wq_callback(void *data)
863 rtlpriv->link_info.num_rx_inperiod = 0; 863 rtlpriv->link_info.num_rx_inperiod = 0;
864 rtlpriv->link_info.num_tx_inperiod = 0; 864 rtlpriv->link_info.num_tx_inperiod = 0;
865 865
866 rtlpriv->link_info.b_busytraffic = b_busytraffic; 866 rtlpriv->link_info.busytraffic = busytraffic;
867 rtlpriv->link_info.b_higher_busytraffic = b_higher_busytraffic; 867 rtlpriv->link_info.higher_busytraffic = higher_busytraffic;
868 rtlpriv->link_info.b_higher_busyrxtraffic = b_higher_busyrxtraffic; 868 rtlpriv->link_info.higher_busyrxtraffic = higher_busyrxtraffic;
869 869
870} 870}
871 871
diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/rtlwifi/base.h
index 3de5a14745f..043045342bc 100644
--- a/drivers/net/wireless/rtlwifi/base.h
+++ b/drivers/net/wireless/rtlwifi/base.h
@@ -30,6 +30,7 @@
30#define __RTL_BASE_H__ 30#define __RTL_BASE_H__
31 31
32#define RTL_DUMMY_OFFSET 0 32#define RTL_DUMMY_OFFSET 0
33#define RTL_RX_DESC_SIZE 24
33#define RTL_DUMMY_UNIT 8 34#define RTL_DUMMY_UNIT 8
34#define RTL_TX_DUMMY_SIZE (RTL_DUMMY_OFFSET * RTL_DUMMY_UNIT) 35#define RTL_TX_DUMMY_SIZE (RTL_DUMMY_OFFSET * RTL_DUMMY_UNIT)
35#define RTL_TX_DESC_SIZE 32 36#define RTL_TX_DESC_SIZE 32
@@ -52,46 +53,22 @@
52#define FRAME_OFFSET_SEQUENCE 22 53#define FRAME_OFFSET_SEQUENCE 22
53#define FRAME_OFFSET_ADDRESS4 24 54#define FRAME_OFFSET_ADDRESS4 24
54 55
55#define SET_80211_HDR_FRAME_CONTROL(_hdr, _val) \
56 WRITEEF2BYTE(_hdr, _val)
57#define SET_80211_HDR_TYPE_AND_SUBTYPE(_hdr, _val) \
58 WRITEEF1BYTE(_hdr, _val)
59#define SET_80211_HDR_PWR_MGNT(_hdr, _val) \
60 SET_BITS_TO_LE_2BYTE(_hdr, 12, 1, _val)
61#define SET_80211_HDR_TO_DS(_hdr, _val) \
62 SET_BITS_TO_LE_2BYTE(_hdr, 8, 1, _val)
63 56
64#define SET_80211_PS_POLL_AID(_hdr, _val) \ 57#define SET_80211_PS_POLL_AID(_hdr, _val) \
65 WRITEEF2BYTE(((u8 *)(_hdr)) + 2, _val) 58 (*(u16 *)((u8 *)(_hdr) + 2) = le16_to_cpu(_val))
66#define SET_80211_PS_POLL_BSSID(_hdr, _val) \ 59#define SET_80211_PS_POLL_BSSID(_hdr, _val) \
67 CP_MACADDR(((u8 *)(_hdr)) + 4, (u8 *)(_val)) 60 memcpy(((u8 *)(_hdr)) + 4, (u8 *)(_val), ETH_ALEN)
68#define SET_80211_PS_POLL_TA(_hdr, _val) \ 61#define SET_80211_PS_POLL_TA(_hdr, _val) \
69 CP_MACADDR(((u8 *)(_hdr)) + 10, (u8 *)(_val)) 62 memcpy(((u8 *)(_hdr)) + 10, (u8 *)(_val), ETH_ALEN)
70 63
71#define SET_80211_HDR_DURATION(_hdr, _val) \ 64#define SET_80211_HDR_DURATION(_hdr, _val) \
72 WRITEEF2BYTE((u8 *)(_hdr)+FRAME_OFFSET_DURATION, _val) 65 (*(u16 *)((u8 *)(_hdr) + FRAME_OFFSET_DURATION) = le16_to_cpu(_val))
73#define SET_80211_HDR_ADDRESS1(_hdr, _val) \ 66#define SET_80211_HDR_ADDRESS1(_hdr, _val) \
74 CP_MACADDR((u8 *)(_hdr)+FRAME_OFFSET_ADDRESS1, (u8*)(_val)) 67 memcpy((u8 *)(_hdr)+FRAME_OFFSET_ADDRESS1, (u8*)(_val), ETH_ALEN)
75#define SET_80211_HDR_ADDRESS2(_hdr, _val) \ 68#define SET_80211_HDR_ADDRESS2(_hdr, _val) \
76 CP_MACADDR((u8 *)(_hdr) + FRAME_OFFSET_ADDRESS2, (u8 *)(_val)) 69 memcpy((u8 *)(_hdr) + FRAME_OFFSET_ADDRESS2, (u8 *)(_val), ETH_ALEN)
77#define SET_80211_HDR_ADDRESS3(_hdr, _val) \ 70#define SET_80211_HDR_ADDRESS3(_hdr, _val) \
78 CP_MACADDR((u8 *)(_hdr)+FRAME_OFFSET_ADDRESS3, (u8 *)(_val)) 71 memcpy((u8 *)(_hdr)+FRAME_OFFSET_ADDRESS3, (u8 *)(_val), ETH_ALEN)
79#define SET_80211_HDR_FRAGMENT_SEQUENCE(_hdr, _val) \
80 WRITEEF2BYTE((u8 *)(_hdr)+FRAME_OFFSET_SEQUENCE, _val)
81
82#define SET_BEACON_PROBE_RSP_TIME_STAMP_LOW(__phdr, __val) \
83 WRITEEF4BYTE(((u8 *)(__phdr)) + 24, __val)
84#define SET_BEACON_PROBE_RSP_TIME_STAMP_HIGH(__phdr, __val) \
85 WRITEEF4BYTE(((u8 *)(__phdr)) + 28, __val)
86#define SET_BEACON_PROBE_RSP_BEACON_INTERVAL(__phdr, __val) \
87 WRITEEF2BYTE(((u8 *)(__phdr)) + 32, __val)
88#define GET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr) \
89 READEF2BYTE(((u8 *)(__phdr)) + 34)
90#define SET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr, __val) \
91 WRITEEF2BYTE(((u8 *)(__phdr)) + 34, __val)
92#define MASK_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr, __val) \
93 SET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr, \
94 (GET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr) & (~(__val))))
95 72
96int rtl_init_core(struct ieee80211_hw *hw); 73int rtl_init_core(struct ieee80211_hw *hw);
97void rtl_deinit_core(struct ieee80211_hw *hw); 74void rtl_deinit_core(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index d6a924a0565..b0996bf8a21 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -434,9 +434,9 @@ static int rtl_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
434 434
435 aci = _rtl_get_hal_qnum(queue); 435 aci = _rtl_get_hal_qnum(queue);
436 mac->ac[aci].aifs = param->aifs; 436 mac->ac[aci].aifs = param->aifs;
437 mac->ac[aci].cw_min = param->cw_min; 437 mac->ac[aci].cw_min = cpu_to_le16(param->cw_min);
438 mac->ac[aci].cw_max = param->cw_max; 438 mac->ac[aci].cw_max = cpu_to_le16(param->cw_max);
439 mac->ac[aci].tx_op = param->txop; 439 mac->ac[aci].tx_op = cpu_to_le16(param->txop);
440 memcpy(&mac->edca_param[aci], param, sizeof(*param)); 440 memcpy(&mac->edca_param[aci], param, sizeof(*param));
441 rtlpriv->cfg->ops->set_qos(hw, aci); 441 rtlpriv->cfg->ops->set_qos(hw, aci);
442 return 0; 442 return 0;
@@ -666,7 +666,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
666 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE, 666 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
667 (u8 *) (&basic_rates)); 667 (u8 *) (&basic_rates));
668 668
669 if (rtlpriv->dm.b_useramask) 669 if (rtlpriv->dm.useramask)
670 rtlpriv->cfg->ops->update_rate_mask(hw, 0); 670 rtlpriv->cfg->ops->update_rate_mask(hw, 0);
671 else 671 else
672 rtlpriv->cfg->ops->update_rate_table(hw); 672 rtlpriv->cfg->ops->update_rate_table(hw);
@@ -681,7 +681,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
681 */ 681 */
682 if (changed & BSS_CHANGED_ASSOC) { 682 if (changed & BSS_CHANGED_ASSOC) {
683 if (bss_conf->assoc) { 683 if (bss_conf->assoc) {
684 if (ppsc->b_fwctrl_lps) { 684 if (ppsc->fwctrl_lps) {
685 u8 mstatus = RT_MEDIA_CONNECT; 685 u8 mstatus = RT_MEDIA_CONNECT;
686 rtlpriv->cfg->ops->set_hw_reg(hw, 686 rtlpriv->cfg->ops->set_hw_reg(hw,
687 HW_VAR_H2C_FW_JOINBSSRPT, 687 HW_VAR_H2C_FW_JOINBSSRPT,
@@ -689,7 +689,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
689 ppsc->report_linked = true; 689 ppsc->report_linked = true;
690 } 690 }
691 } else { 691 } else {
692 if (ppsc->b_fwctrl_lps) { 692 if (ppsc->fwctrl_lps) {
693 u8 mstatus = RT_MEDIA_DISCONNECT; 693 u8 mstatus = RT_MEDIA_DISCONNECT;
694 rtlpriv->cfg->ops->set_hw_reg(hw, 694 rtlpriv->cfg->ops->set_hw_reg(hw,
695 HW_VAR_H2C_FW_JOINBSSRPT, 695 HW_VAR_H2C_FW_JOINBSSRPT,
@@ -748,7 +748,8 @@ static void rtl_op_sta_notify(struct ieee80211_hw *hw,
748static int rtl_op_ampdu_action(struct ieee80211_hw *hw, 748static int rtl_op_ampdu_action(struct ieee80211_hw *hw,
749 struct ieee80211_vif *vif, 749 struct ieee80211_vif *vif,
750 enum ieee80211_ampdu_mlme_action action, 750 enum ieee80211_ampdu_mlme_action action,
751 struct ieee80211_sta *sta, u16 tid, u16 * ssn) 751 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
752 u8 buf_size)
752{ 753{
753 struct rtl_priv *rtlpriv = rtl_priv(hw); 754 struct rtl_priv *rtlpriv = rtl_priv(hw);
754 755
@@ -817,7 +818,7 @@ static void rtl_op_sw_scan_complete(struct ieee80211_hw *hw)
817 /* fix fwlps issue */ 818 /* fix fwlps issue */
818 rtlpriv->cfg->ops->set_network_type(hw, mac->opmode); 819 rtlpriv->cfg->ops->set_network_type(hw, mac->opmode);
819 820
820 if (rtlpriv->dm.b_useramask) 821 if (rtlpriv->dm.useramask)
821 rtlpriv->cfg->ops->update_rate_mask(hw, 0); 822 rtlpriv->cfg->ops->update_rate_mask(hw, 0);
822 else 823 else
823 rtlpriv->cfg->ops->update_rate_table(hw); 824 rtlpriv->cfg->ops->update_rate_table(hw);
diff --git a/drivers/net/wireless/rtlwifi/debug.h b/drivers/net/wireless/rtlwifi/debug.h
index 08bdec2ceda..e4aa8687408 100644
--- a/drivers/net/wireless/rtlwifi/debug.h
+++ b/drivers/net/wireless/rtlwifi/debug.h
@@ -105,6 +105,7 @@
105#define COMP_MAC80211 BIT(26) 105#define COMP_MAC80211 BIT(26)
106#define COMP_REGD BIT(27) 106#define COMP_REGD BIT(27)
107#define COMP_CHAN BIT(28) 107#define COMP_CHAN BIT(28)
108#define COMP_USB BIT(29)
108 109
109/*-------------------------------------------------------------- 110/*--------------------------------------------------------------
110 Define the rt_print components 111 Define the rt_print components
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 1758d446324..9cd7703c2a3 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -50,7 +50,7 @@ static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
50 u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor; 50 u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
51 51
52 ppsc->reg_rfps_level = 0; 52 ppsc->reg_rfps_level = 0;
53 ppsc->b_support_aspm = 0; 53 ppsc->support_aspm = 0;
54 54
55 /*Update PCI ASPM setting */ 55 /*Update PCI ASPM setting */
56 ppsc->const_amdpci_aspm = rtlpci->const_amdpci_aspm; 56 ppsc->const_amdpci_aspm = rtlpci->const_amdpci_aspm;
@@ -115,29 +115,29 @@ static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
115 switch (rtlpci->const_support_pciaspm) { 115 switch (rtlpci->const_support_pciaspm) {
116 case 0:{ 116 case 0:{
117 /*Not support ASPM. */ 117 /*Not support ASPM. */
118 bool b_support_aspm = false; 118 bool support_aspm = false;
119 ppsc->b_support_aspm = b_support_aspm; 119 ppsc->support_aspm = support_aspm;
120 break; 120 break;
121 } 121 }
122 case 1:{ 122 case 1:{
123 /*Support ASPM. */ 123 /*Support ASPM. */
124 bool b_support_aspm = true; 124 bool support_aspm = true;
125 bool b_support_backdoor = true; 125 bool support_backdoor = true;
126 ppsc->b_support_aspm = b_support_aspm; 126 ppsc->support_aspm = support_aspm;
127 127
128 /*if(priv->oem_id == RT_CID_TOSHIBA && 128 /*if(priv->oem_id == RT_CID_TOSHIBA &&
129 !priv->ndis_adapter.amd_l1_patch) 129 !priv->ndis_adapter.amd_l1_patch)
130 b_support_backdoor = false; */ 130 support_backdoor = false; */
131 131
132 ppsc->b_support_backdoor = b_support_backdoor; 132 ppsc->support_backdoor = support_backdoor;
133 133
134 break; 134 break;
135 } 135 }
136 case 2: 136 case 2:
137 /*ASPM value set by chipset. */ 137 /*ASPM value set by chipset. */
138 if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL) { 138 if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL) {
139 bool b_support_aspm = true; 139 bool support_aspm = true;
140 ppsc->b_support_aspm = b_support_aspm; 140 ppsc->support_aspm = support_aspm;
141 } 141 }
142 break; 142 break;
143 default: 143 default:
@@ -476,9 +476,9 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
476 476
477 skb = __skb_dequeue(&ring->queue); 477 skb = __skb_dequeue(&ring->queue);
478 pci_unmap_single(rtlpci->pdev, 478 pci_unmap_single(rtlpci->pdev,
479 le32_to_cpu(rtlpriv->cfg->ops-> 479 rtlpriv->cfg->ops->
480 get_desc((u8 *) entry, true, 480 get_desc((u8 *) entry, true,
481 HW_DESC_TXBUFF_ADDR)), 481 HW_DESC_TXBUFF_ADDR),
482 skb->len, PCI_DMA_TODEVICE); 482 skb->len, PCI_DMA_TODEVICE);
483 483
484 RT_TRACE(rtlpriv, (COMP_INTR | COMP_SEND), DBG_TRACE, 484 RT_TRACE(rtlpriv, (COMP_INTR | COMP_SEND), DBG_TRACE,
@@ -557,7 +557,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
557 return; 557 return;
558 } else { 558 } else {
559 struct ieee80211_hdr *hdr; 559 struct ieee80211_hdr *hdr;
560 u16 fc; 560 __le16 fc;
561 struct sk_buff *new_skb = NULL; 561 struct sk_buff *new_skb = NULL;
562 562
563 rtlpriv->cfg->ops->query_rx_desc(hw, &stats, 563 rtlpriv->cfg->ops->query_rx_desc(hw, &stats,
@@ -583,9 +583,9 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
583 */ 583 */
584 584
585 hdr = (struct ieee80211_hdr *)(skb->data); 585 hdr = (struct ieee80211_hdr *)(skb->data);
586 fc = le16_to_cpu(hdr->frame_control); 586 fc = hdr->frame_control;
587 587
588 if (!stats.b_crc) { 588 if (!stats.crc) {
589 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, 589 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status,
590 sizeof(rx_status)); 590 sizeof(rx_status));
591 591
@@ -666,7 +666,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
666 666
667 } 667 }
668done: 668done:
669 bufferaddress = cpu_to_le32(*((dma_addr_t *) skb->cb)); 669 bufferaddress = (u32)(*((dma_addr_t *) skb->cb));
670 tmp_one = 1; 670 tmp_one = 1;
671 rtlpriv->cfg->ops->set_desc((u8 *) pdesc, false, 671 rtlpriv->cfg->ops->set_desc((u8 *) pdesc, false,
672 HW_DESC_RXBUFF_ADDR, 672 HW_DESC_RXBUFF_ADDR,
@@ -690,75 +690,6 @@ done:
690 690
691} 691}
692 692
693void _rtl_pci_tx_interrupt(struct ieee80211_hw *hw)
694{
695 struct rtl_priv *rtlpriv = rtl_priv(hw);
696 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
697 int prio;
698
699 for (prio = 0; prio < RTL_PCI_MAX_TX_QUEUE_COUNT; prio++) {
700 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio];
701
702 while (skb_queue_len(&ring->queue)) {
703 struct rtl_tx_desc *entry = &ring->desc[ring->idx];
704 struct sk_buff *skb;
705 struct ieee80211_tx_info *info;
706 u8 own;
707
708 /*
709 *beacon packet will only use the first
710 *descriptor defautly, and the own may not
711 *be cleared by the hardware, and
712 *beacon will free in prepare beacon
713 */
714 if (prio == BEACON_QUEUE || prio == TXCMD_QUEUE ||
715 prio == HCCA_QUEUE)
716 break;
717
718 own = (u8)rtlpriv->cfg->ops->get_desc((u8 *)entry,
719 true,
720 HW_DESC_OWN);
721
722 if (own)
723 break;
724
725 skb = __skb_dequeue(&ring->queue);
726 pci_unmap_single(rtlpci->pdev,
727 le32_to_cpu(rtlpriv->cfg->ops->
728 get_desc((u8 *) entry,
729 true,
730 HW_DESC_TXBUFF_ADDR)),
731 skb->len, PCI_DMA_TODEVICE);
732
733 ring->idx = (ring->idx + 1) % ring->entries;
734
735 info = IEEE80211_SKB_CB(skb);
736 ieee80211_tx_info_clear_status(info);
737
738 info->flags |= IEEE80211_TX_STAT_ACK;
739 /*info->status.rates[0].count = 1; */
740
741 ieee80211_tx_status_irqsafe(hw, skb);
742
743 if ((ring->entries - skb_queue_len(&ring->queue))
744 == 2 && prio != BEACON_QUEUE) {
745 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
746 ("more desc left, wake "
747 "skb_queue@%d,ring->idx = %d,"
748 "skb_queue_len = 0x%d\n",
749 prio, ring->idx,
750 skb_queue_len(&ring->queue)));
751
752 ieee80211_wake_queue(hw,
753 skb_get_queue_mapping
754 (skb));
755 }
756
757 skb = NULL;
758 }
759 }
760}
761
762static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id) 693static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
763{ 694{
764 struct ieee80211_hw *hw = dev_id; 695 struct ieee80211_hw *hw = dev_id;
@@ -959,17 +890,17 @@ static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
959 rtlhal->hw = hw; 890 rtlhal->hw = hw;
960 rtlpci->pdev = pdev; 891 rtlpci->pdev = pdev;
961 892
962 ppsc->b_inactiveps = false; 893 ppsc->inactiveps = false;
963 ppsc->b_leisure_ps = true; 894 ppsc->leisure_ps = true;
964 ppsc->b_fwctrl_lps = true; 895 ppsc->fwctrl_lps = true;
965 ppsc->b_reg_fwctrl_lps = 3; 896 ppsc->reg_fwctrl_lps = 3;
966 ppsc->reg_max_lps_awakeintvl = 5; 897 ppsc->reg_max_lps_awakeintvl = 5;
967 898
968 if (ppsc->b_reg_fwctrl_lps == 1) 899 if (ppsc->reg_fwctrl_lps == 1)
969 ppsc->fwctrl_psmode = FW_PS_MIN_MODE; 900 ppsc->fwctrl_psmode = FW_PS_MIN_MODE;
970 else if (ppsc->b_reg_fwctrl_lps == 2) 901 else if (ppsc->reg_fwctrl_lps == 2)
971 ppsc->fwctrl_psmode = FW_PS_MAX_MODE; 902 ppsc->fwctrl_psmode = FW_PS_MAX_MODE;
972 else if (ppsc->b_reg_fwctrl_lps == 3) 903 else if (ppsc->reg_fwctrl_lps == 3)
973 ppsc->fwctrl_psmode = FW_PS_DTIM_MODE; 904 ppsc->fwctrl_psmode = FW_PS_DTIM_MODE;
974 905
975 /*Tx/Rx related var */ 906 /*Tx/Rx related var */
@@ -1024,9 +955,8 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
1024 ("queue:%d, ring_addr:%p\n", prio, ring)); 955 ("queue:%d, ring_addr:%p\n", prio, ring));
1025 956
1026 for (i = 0; i < entries; i++) { 957 for (i = 0; i < entries; i++) {
1027 nextdescaddress = cpu_to_le32((u32) dma + 958 nextdescaddress = (u32) dma + ((i + 1) % entries) *
1028 ((i + 1) % entries) * 959 sizeof(*ring);
1029 sizeof(*ring));
1030 960
1031 rtlpriv->cfg->ops->set_desc((u8 *)&(ring[i]), 961 rtlpriv->cfg->ops->set_desc((u8 *)&(ring[i]),
1032 true, HW_DESC_TX_NEXTDESC_ADDR, 962 true, HW_DESC_TX_NEXTDESC_ADDR,
@@ -1090,7 +1020,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw)
1090 rtlpci->rxbuffersize, 1020 rtlpci->rxbuffersize,
1091 PCI_DMA_FROMDEVICE); 1021 PCI_DMA_FROMDEVICE);
1092 1022
1093 bufferaddress = cpu_to_le32(*((dma_addr_t *)skb->cb)); 1023 bufferaddress = (u32)(*((dma_addr_t *)skb->cb));
1094 rtlpriv->cfg->ops->set_desc((u8 *)entry, false, 1024 rtlpriv->cfg->ops->set_desc((u8 *)entry, false,
1095 HW_DESC_RXBUFF_ADDR, 1025 HW_DESC_RXBUFF_ADDR,
1096 (u8 *)&bufferaddress); 1026 (u8 *)&bufferaddress);
@@ -1121,9 +1051,9 @@ static void _rtl_pci_free_tx_ring(struct ieee80211_hw *hw,
1121 struct sk_buff *skb = __skb_dequeue(&ring->queue); 1051 struct sk_buff *skb = __skb_dequeue(&ring->queue);
1122 1052
1123 pci_unmap_single(rtlpci->pdev, 1053 pci_unmap_single(rtlpci->pdev,
1124 le32_to_cpu(rtlpriv->cfg-> 1054 rtlpriv->cfg->
1125 ops->get_desc((u8 *) entry, true, 1055 ops->get_desc((u8 *) entry, true,
1126 HW_DESC_TXBUFF_ADDR)), 1056 HW_DESC_TXBUFF_ADDR),
1127 skb->len, PCI_DMA_TODEVICE); 1057 skb->len, PCI_DMA_TODEVICE);
1128 kfree_skb(skb); 1058 kfree_skb(skb);
1129 ring->idx = (ring->idx + 1) % ring->entries; 1059 ring->idx = (ring->idx + 1) % ring->entries;
@@ -1255,11 +1185,11 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
1255 __skb_dequeue(&ring->queue); 1185 __skb_dequeue(&ring->queue);
1256 1186
1257 pci_unmap_single(rtlpci->pdev, 1187 pci_unmap_single(rtlpci->pdev,
1258 le32_to_cpu(rtlpriv->cfg->ops-> 1188 rtlpriv->cfg->ops->
1259 get_desc((u8 *) 1189 get_desc((u8 *)
1260 entry, 1190 entry,
1261 true, 1191 true,
1262 HW_DESC_TXBUFF_ADDR)), 1192 HW_DESC_TXBUFF_ADDR),
1263 skb->len, PCI_DMA_TODEVICE); 1193 skb->len, PCI_DMA_TODEVICE);
1264 kfree_skb(skb); 1194 kfree_skb(skb);
1265 ring->idx = (ring->idx + 1) % ring->entries; 1195 ring->idx = (ring->idx + 1) % ring->entries;
@@ -1273,7 +1203,7 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
1273 return 0; 1203 return 0;
1274} 1204}
1275 1205
1276unsigned int _rtl_mac_to_hwqueue(u16 fc, 1206static unsigned int _rtl_mac_to_hwqueue(__le16 fc,
1277 unsigned int mac80211_queue_index) 1207 unsigned int mac80211_queue_index)
1278{ 1208{
1279 unsigned int hw_queue_index; 1209 unsigned int hw_queue_index;
@@ -1312,7 +1242,7 @@ out:
1312 return hw_queue_index; 1242 return hw_queue_index;
1313} 1243}
1314 1244
1315int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 1245static int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1316{ 1246{
1317 struct rtl_priv *rtlpriv = rtl_priv(hw); 1247 struct rtl_priv *rtlpriv = rtl_priv(hw);
1318 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 1248 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
@@ -1323,7 +1253,7 @@ int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1323 unsigned int queue_index, hw_queue; 1253 unsigned int queue_index, hw_queue;
1324 unsigned long flags; 1254 unsigned long flags;
1325 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); 1255 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
1326 u16 fc = le16_to_cpu(hdr->frame_control); 1256 __le16 fc = hdr->frame_control;
1327 u8 *pda_addr = hdr->addr1; 1257 u8 *pda_addr = hdr->addr1;
1328 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 1258 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1329 /*ssn */ 1259 /*ssn */
@@ -1429,7 +1359,7 @@ int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1429 return 0; 1359 return 0;
1430} 1360}
1431 1361
1432void rtl_pci_deinit(struct ieee80211_hw *hw) 1362static void rtl_pci_deinit(struct ieee80211_hw *hw)
1433{ 1363{
1434 struct rtl_priv *rtlpriv = rtl_priv(hw); 1364 struct rtl_priv *rtlpriv = rtl_priv(hw);
1435 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 1365 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -1444,7 +1374,7 @@ void rtl_pci_deinit(struct ieee80211_hw *hw)
1444 1374
1445} 1375}
1446 1376
1447int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev) 1377static int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev)
1448{ 1378{
1449 struct rtl_priv *rtlpriv = rtl_priv(hw); 1379 struct rtl_priv *rtlpriv = rtl_priv(hw);
1450 int err; 1380 int err;
@@ -1461,7 +1391,7 @@ int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev)
1461 return 1; 1391 return 1;
1462} 1392}
1463 1393
1464int rtl_pci_start(struct ieee80211_hw *hw) 1394static int rtl_pci_start(struct ieee80211_hw *hw)
1465{ 1395{
1466 struct rtl_priv *rtlpriv = rtl_priv(hw); 1396 struct rtl_priv *rtlpriv = rtl_priv(hw);
1467 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 1397 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@@ -1496,7 +1426,7 @@ int rtl_pci_start(struct ieee80211_hw *hw)
1496 return 0; 1426 return 0;
1497} 1427}
1498 1428
1499void rtl_pci_stop(struct ieee80211_hw *hw) 1429static void rtl_pci_stop(struct ieee80211_hw *hw)
1500{ 1430{
1501 struct rtl_priv *rtlpriv = rtl_priv(hw); 1431 struct rtl_priv *rtlpriv = rtl_priv(hw);
1502 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 1432 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -1547,13 +1477,11 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
1547 struct pci_dev *bridge_pdev = pdev->bus->self; 1477 struct pci_dev *bridge_pdev = pdev->bus->self;
1548 u16 venderid; 1478 u16 venderid;
1549 u16 deviceid; 1479 u16 deviceid;
1550 u8 revisionid;
1551 u16 irqline; 1480 u16 irqline;
1552 u8 tmp; 1481 u8 tmp;
1553 1482
1554 venderid = pdev->vendor; 1483 venderid = pdev->vendor;
1555 deviceid = pdev->device; 1484 deviceid = pdev->device;
1556 pci_read_config_byte(pdev, 0x8, &revisionid);
1557 pci_read_config_word(pdev, 0x3C, &irqline); 1485 pci_read_config_word(pdev, 0x3C, &irqline);
1558 1486
1559 if (deviceid == RTL_PCI_8192_DID || 1487 if (deviceid == RTL_PCI_8192_DID ||
@@ -1564,7 +1492,7 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
1564 deviceid == RTL_PCI_8173_DID || 1492 deviceid == RTL_PCI_8173_DID ||
1565 deviceid == RTL_PCI_8172_DID || 1493 deviceid == RTL_PCI_8172_DID ||
1566 deviceid == RTL_PCI_8171_DID) { 1494 deviceid == RTL_PCI_8171_DID) {
1567 switch (revisionid) { 1495 switch (pdev->revision) {
1568 case RTL_PCI_REVISION_ID_8192PCIE: 1496 case RTL_PCI_REVISION_ID_8192PCIE:
1569 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, 1497 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1570 ("8192 PCI-E is found - " 1498 ("8192 PCI-E is found - "
@@ -1838,7 +1766,7 @@ fail3:
1838 ieee80211_free_hw(hw); 1766 ieee80211_free_hw(hw);
1839 1767
1840 if (rtlpriv->io.pci_mem_start != 0) 1768 if (rtlpriv->io.pci_mem_start != 0)
1841 pci_iounmap(pdev, (void *)rtlpriv->io.pci_mem_start); 1769 pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
1842 1770
1843fail2: 1771fail2:
1844 pci_release_regions(pdev); 1772 pci_release_regions(pdev);
@@ -1888,7 +1816,7 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
1888 } 1816 }
1889 1817
1890 if (rtlpriv->io.pci_mem_start != 0) { 1818 if (rtlpriv->io.pci_mem_start != 0) {
1891 pci_iounmap(pdev, (void *)rtlpriv->io.pci_mem_start); 1819 pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
1892 pci_release_regions(pdev); 1820 pci_release_regions(pdev);
1893 } 1821 }
1894 1822
diff --git a/drivers/net/wireless/rtlwifi/pci.h b/drivers/net/wireless/rtlwifi/pci.h
index d36a6693995..0caa8142972 100644
--- a/drivers/net/wireless/rtlwifi/pci.h
+++ b/drivers/net/wireless/rtlwifi/pci.h
@@ -244,34 +244,34 @@ int rtl_pci_resume(struct pci_dev *pdev);
244 244
245static inline u8 pci_read8_sync(struct rtl_priv *rtlpriv, u32 addr) 245static inline u8 pci_read8_sync(struct rtl_priv *rtlpriv, u32 addr)
246{ 246{
247 return 0xff & readb((u8 *) rtlpriv->io.pci_mem_start + addr); 247 return readb((u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
248} 248}
249 249
250static inline u16 pci_read16_sync(struct rtl_priv *rtlpriv, u32 addr) 250static inline u16 pci_read16_sync(struct rtl_priv *rtlpriv, u32 addr)
251{ 251{
252 return readw((u8 *) rtlpriv->io.pci_mem_start + addr); 252 return readw((u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
253} 253}
254 254
255static inline u32 pci_read32_sync(struct rtl_priv *rtlpriv, u32 addr) 255static inline u32 pci_read32_sync(struct rtl_priv *rtlpriv, u32 addr)
256{ 256{
257 return readl((u8 *) rtlpriv->io.pci_mem_start + addr); 257 return readl((u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
258} 258}
259 259
260static inline void pci_write8_async(struct rtl_priv *rtlpriv, u32 addr, u8 val) 260static inline void pci_write8_async(struct rtl_priv *rtlpriv, u32 addr, u8 val)
261{ 261{
262 writeb(val, (u8 *) rtlpriv->io.pci_mem_start + addr); 262 writeb(val, (u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
263} 263}
264 264
265static inline void pci_write16_async(struct rtl_priv *rtlpriv, 265static inline void pci_write16_async(struct rtl_priv *rtlpriv,
266 u32 addr, u16 val) 266 u32 addr, u16 val)
267{ 267{
268 writew(val, (u8 *) rtlpriv->io.pci_mem_start + addr); 268 writew(val, (u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
269} 269}
270 270
271static inline void pci_write32_async(struct rtl_priv *rtlpriv, 271static inline void pci_write32_async(struct rtl_priv *rtlpriv,
272 u32 addr, u32 val) 272 u32 addr, u32 val)
273{ 273{
274 writel(val, (u8 *) rtlpriv->io.pci_mem_start + addr); 274 writel(val, (u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
275} 275}
276 276
277static inline void rtl_pci_raw_write_port_ulong(u32 port, u32 val) 277static inline void rtl_pci_raw_write_port_ulong(u32 port, u32 val)
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index d2326c13449..6b7e217b6b8 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -86,7 +86,7 @@ bool rtl_ps_set_rf_state(struct ieee80211_hw *hw,
86 struct rtl_priv *rtlpriv = rtl_priv(hw); 86 struct rtl_priv *rtlpriv = rtl_priv(hw);
87 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 87 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
88 enum rf_pwrstate rtstate; 88 enum rf_pwrstate rtstate;
89 bool b_actionallowed = false; 89 bool actionallowed = false;
90 u16 rfwait_cnt = 0; 90 u16 rfwait_cnt = 0;
91 unsigned long flag; 91 unsigned long flag;
92 92
@@ -139,13 +139,13 @@ no_protect:
139 ppsc->rfoff_reason &= (~changesource); 139 ppsc->rfoff_reason &= (~changesource);
140 140
141 if ((changesource == RF_CHANGE_BY_HW) && 141 if ((changesource == RF_CHANGE_BY_HW) &&
142 (ppsc->b_hwradiooff == true)) { 142 (ppsc->hwradiooff == true)) {
143 ppsc->b_hwradiooff = false; 143 ppsc->hwradiooff = false;
144 } 144 }
145 145
146 if (!ppsc->rfoff_reason) { 146 if (!ppsc->rfoff_reason) {
147 ppsc->rfoff_reason = 0; 147 ppsc->rfoff_reason = 0;
148 b_actionallowed = true; 148 actionallowed = true;
149 } 149 }
150 150
151 break; 151 break;
@@ -153,17 +153,17 @@ no_protect:
153 case ERFOFF: 153 case ERFOFF:
154 154
155 if ((changesource == RF_CHANGE_BY_HW) 155 if ((changesource == RF_CHANGE_BY_HW)
156 && (ppsc->b_hwradiooff == false)) { 156 && (ppsc->hwradiooff == false)) {
157 ppsc->b_hwradiooff = true; 157 ppsc->hwradiooff = true;
158 } 158 }
159 159
160 ppsc->rfoff_reason |= changesource; 160 ppsc->rfoff_reason |= changesource;
161 b_actionallowed = true; 161 actionallowed = true;
162 break; 162 break;
163 163
164 case ERFSLEEP: 164 case ERFSLEEP:
165 ppsc->rfoff_reason |= changesource; 165 ppsc->rfoff_reason |= changesource;
166 b_actionallowed = true; 166 actionallowed = true;
167 break; 167 break;
168 168
169 default: 169 default:
@@ -172,7 +172,7 @@ no_protect:
172 break; 172 break;
173 } 173 }
174 174
175 if (b_actionallowed) 175 if (actionallowed)
176 rtlpriv->cfg->ops->set_rf_power_state(hw, state_toset); 176 rtlpriv->cfg->ops->set_rf_power_state(hw, state_toset);
177 177
178 if (!protect_or_not) { 178 if (!protect_or_not) {
@@ -181,7 +181,7 @@ no_protect:
181 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag); 181 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
182 } 182 }
183 183
184 return b_actionallowed; 184 return actionallowed;
185} 185}
186EXPORT_SYMBOL(rtl_ps_set_rf_state); 186EXPORT_SYMBOL(rtl_ps_set_rf_state);
187 187
@@ -191,7 +191,7 @@ static void _rtl_ps_inactive_ps(struct ieee80211_hw *hw)
191 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 191 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
192 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 192 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
193 193
194 ppsc->b_swrf_processing = true; 194 ppsc->swrf_processing = true;
195 195
196 if (ppsc->inactive_pwrstate == ERFON && rtlhal->interface == INTF_PCI) { 196 if (ppsc->inactive_pwrstate == ERFON && rtlhal->interface == INTF_PCI) {
197 if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) && 197 if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) &&
@@ -213,7 +213,7 @@ static void _rtl_ps_inactive_ps(struct ieee80211_hw *hw)
213 } 213 }
214 } 214 }
215 215
216 ppsc->b_swrf_processing = false; 216 ppsc->swrf_processing = false;
217} 217}
218 218
219void rtl_ips_nic_off_wq_callback(void *data) 219void rtl_ips_nic_off_wq_callback(void *data)
@@ -239,13 +239,13 @@ void rtl_ips_nic_off_wq_callback(void *data)
239 if (rtlpriv->sec.being_setkey) 239 if (rtlpriv->sec.being_setkey)
240 return; 240 return;
241 241
242 if (ppsc->b_inactiveps) { 242 if (ppsc->inactiveps) {
243 rtstate = ppsc->rfpwr_state; 243 rtstate = ppsc->rfpwr_state;
244 244
245 /* 245 /*
246 *Do not enter IPS in the following conditions: 246 *Do not enter IPS in the following conditions:
247 *(1) RF is already OFF or Sleep 247 *(1) RF is already OFF or Sleep
248 *(2) b_swrf_processing (indicates the IPS is still under going) 248 *(2) swrf_processing (indicates the IPS is still under going)
249 *(3) Connectted (only disconnected can trigger IPS) 249 *(3) Connectted (only disconnected can trigger IPS)
250 *(4) IBSS (send Beacon) 250 *(4) IBSS (send Beacon)
251 *(5) AP mode (send Beacon) 251 *(5) AP mode (send Beacon)
@@ -253,14 +253,14 @@ void rtl_ips_nic_off_wq_callback(void *data)
253 */ 253 */
254 254
255 if (rtstate == ERFON && 255 if (rtstate == ERFON &&
256 !ppsc->b_swrf_processing && 256 !ppsc->swrf_processing &&
257 (mac->link_state == MAC80211_NOLINK) && 257 (mac->link_state == MAC80211_NOLINK) &&
258 !mac->act_scanning) { 258 !mac->act_scanning) {
259 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, 259 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
260 ("IPSEnter(): Turn off RF.\n")); 260 ("IPSEnter(): Turn off RF.\n"));
261 261
262 ppsc->inactive_pwrstate = ERFOFF; 262 ppsc->inactive_pwrstate = ERFOFF;
263 ppsc->b_in_powersavemode = true; 263 ppsc->in_powersavemode = true;
264 264
265 /*rtl_pci_reset_trx_ring(hw); */ 265 /*rtl_pci_reset_trx_ring(hw); */
266 _rtl_ps_inactive_ps(hw); 266 _rtl_ps_inactive_ps(hw);
@@ -290,15 +290,15 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw)
290 290
291 spin_lock_irqsave(&rtlpriv->locks.ips_lock, flags); 291 spin_lock_irqsave(&rtlpriv->locks.ips_lock, flags);
292 292
293 if (ppsc->b_inactiveps) { 293 if (ppsc->inactiveps) {
294 rtstate = ppsc->rfpwr_state; 294 rtstate = ppsc->rfpwr_state;
295 295
296 if (rtstate != ERFON && 296 if (rtstate != ERFON &&
297 !ppsc->b_swrf_processing && 297 !ppsc->swrf_processing &&
298 ppsc->rfoff_reason <= RF_CHANGE_BY_IPS) { 298 ppsc->rfoff_reason <= RF_CHANGE_BY_IPS) {
299 299
300 ppsc->inactive_pwrstate = ERFON; 300 ppsc->inactive_pwrstate = ERFON;
301 ppsc->b_in_powersavemode = false; 301 ppsc->in_powersavemode = false;
302 302
303 _rtl_ps_inactive_ps(hw); 303 _rtl_ps_inactive_ps(hw);
304 } 304 }
@@ -370,9 +370,9 @@ static void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
370 * mode and set RPWM to turn RF on. 370 * mode and set RPWM to turn RF on.
371 */ 371 */
372 372
373 if ((ppsc->b_fwctrl_lps) && (ppsc->b_leisure_ps) && 373 if ((ppsc->fwctrl_lps) && (ppsc->leisure_ps) &&
374 ppsc->report_linked) { 374 ppsc->report_linked) {
375 bool b_fw_current_inps; 375 bool fw_current_inps;
376 if (ppsc->dot11_psmode == EACTIVE) { 376 if (ppsc->dot11_psmode == EACTIVE) {
377 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, 377 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
378 ("FW LPS leave ps_mode:%x\n", 378 ("FW LPS leave ps_mode:%x\n",
@@ -385,11 +385,11 @@ static void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
385 rtlpriv->cfg->ops->set_hw_reg(hw, 385 rtlpriv->cfg->ops->set_hw_reg(hw,
386 HW_VAR_H2C_FW_PWRMODE, 386 HW_VAR_H2C_FW_PWRMODE,
387 (u8 *) (&fw_pwrmode)); 387 (u8 *) (&fw_pwrmode));
388 b_fw_current_inps = false; 388 fw_current_inps = false;
389 389
390 rtlpriv->cfg->ops->set_hw_reg(hw, 390 rtlpriv->cfg->ops->set_hw_reg(hw,
391 HW_VAR_FW_PSMODE_STATUS, 391 HW_VAR_FW_PSMODE_STATUS,
392 (u8 *) (&b_fw_current_inps)); 392 (u8 *) (&fw_current_inps));
393 393
394 } else { 394 } else {
395 if (rtl_get_fwlps_doze(hw)) { 395 if (rtl_get_fwlps_doze(hw)) {
@@ -398,10 +398,10 @@ static void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
398 ppsc->fwctrl_psmode)); 398 ppsc->fwctrl_psmode));
399 399
400 rpwm_val = 0x02; /* RF off */ 400 rpwm_val = 0x02; /* RF off */
401 b_fw_current_inps = true; 401 fw_current_inps = true;
402 rtlpriv->cfg->ops->set_hw_reg(hw, 402 rtlpriv->cfg->ops->set_hw_reg(hw,
403 HW_VAR_FW_PSMODE_STATUS, 403 HW_VAR_FW_PSMODE_STATUS,
404 (u8 *) (&b_fw_current_inps)); 404 (u8 *) (&fw_current_inps));
405 rtlpriv->cfg->ops->set_hw_reg(hw, 405 rtlpriv->cfg->ops->set_hw_reg(hw,
406 HW_VAR_H2C_FW_PWRMODE, 406 HW_VAR_H2C_FW_PWRMODE,
407 (u8 *) (&ppsc->fwctrl_psmode)); 407 (u8 *) (&ppsc->fwctrl_psmode));
@@ -425,13 +425,13 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
425 struct rtl_priv *rtlpriv = rtl_priv(hw); 425 struct rtl_priv *rtlpriv = rtl_priv(hw);
426 unsigned long flag; 426 unsigned long flag;
427 427
428 if (!(ppsc->b_fwctrl_lps && ppsc->b_leisure_ps)) 428 if (!(ppsc->fwctrl_lps && ppsc->leisure_ps))
429 return; 429 return;
430 430
431 if (rtlpriv->sec.being_setkey) 431 if (rtlpriv->sec.being_setkey)
432 return; 432 return;
433 433
434 if (rtlpriv->link_info.b_busytraffic) 434 if (rtlpriv->link_info.busytraffic)
435 return; 435 return;
436 436
437 /*sleep after linked 10s, to let DHCP and 4-way handshake ok enough!! */ 437 /*sleep after linked 10s, to let DHCP and 4-way handshake ok enough!! */
@@ -446,7 +446,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
446 446
447 spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag); 447 spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
448 448
449 if (ppsc->b_leisure_ps) { 449 if (ppsc->leisure_ps) {
450 /* Idle for a while if we connect to AP a while ago. */ 450 /* Idle for a while if we connect to AP a while ago. */
451 if (mac->cnt_after_linked >= 2) { 451 if (mac->cnt_after_linked >= 2) {
452 if (ppsc->dot11_psmode == EACTIVE) { 452 if (ppsc->dot11_psmode == EACTIVE) {
@@ -470,7 +470,7 @@ void rtl_lps_leave(struct ieee80211_hw *hw)
470 470
471 spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag); 471 spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
472 472
473 if (ppsc->b_fwctrl_lps && ppsc->b_leisure_ps) { 473 if (ppsc->fwctrl_lps && ppsc->leisure_ps) {
474 if (ppsc->dot11_psmode != EACTIVE) { 474 if (ppsc->dot11_psmode != EACTIVE) {
475 475
476 /*FIX ME */ 476 /*FIX ME */
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
new file mode 100644
index 00000000000..b4f1e4e6b73
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
@@ -0,0 +1,1388 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30struct dig_t dm_digtable;
31static struct ps_t dm_pstable;
32
33static const u32 ofdmswing_table[OFDM_TABLE_SIZE] = {
34 0x7f8001fe,
35 0x788001e2,
36 0x71c001c7,
37 0x6b8001ae,
38 0x65400195,
39 0x5fc0017f,
40 0x5a400169,
41 0x55400155,
42 0x50800142,
43 0x4c000130,
44 0x47c0011f,
45 0x43c0010f,
46 0x40000100,
47 0x3c8000f2,
48 0x390000e4,
49 0x35c000d7,
50 0x32c000cb,
51 0x300000c0,
52 0x2d4000b5,
53 0x2ac000ab,
54 0x288000a2,
55 0x26000098,
56 0x24000090,
57 0x22000088,
58 0x20000080,
59 0x1e400079,
60 0x1c800072,
61 0x1b00006c,
62 0x19800066,
63 0x18000060,
64 0x16c0005b,
65 0x15800056,
66 0x14400051,
67 0x1300004c,
68 0x12000048,
69 0x11000044,
70 0x10000040,
71};
72
73static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = {
74 {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04},
75 {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04},
76 {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03},
77 {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03},
78 {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03},
79 {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03},
80 {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03},
81 {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03},
82 {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02},
83 {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02},
84 {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02},
85 {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02},
86 {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02},
87 {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02},
88 {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02},
89 {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02},
90 {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01},
91 {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02},
92 {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01},
93 {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},
94 {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},
95 {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01},
96 {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01},
97 {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01},
98 {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01},
99 {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01},
100 {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01},
101 {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01},
102 {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01},
103 {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01},
104 {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01},
105 {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01},
106 {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01}
107};
108
109static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
110 {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00},
111 {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00},
112 {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00},
113 {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00},
114 {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00},
115 {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00},
116 {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00},
117 {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00},
118 {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00},
119 {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00},
120 {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00},
121 {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00},
122 {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00},
123 {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00},
124 {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00},
125 {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00},
126 {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00},
127 {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00},
128 {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00},
129 {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},
130 {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},
131 {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00},
132 {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00},
133 {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00},
134 {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00},
135 {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00},
136 {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00},
137 {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00},
138 {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00},
139 {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00},
140 {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00},
141 {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00},
142 {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00}
143};
144
145static void rtl92c_dm_diginit(struct ieee80211_hw *hw)
146{
147 dm_digtable.dig_enable_flag = true;
148 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
149 dm_digtable.cur_igvalue = 0x20;
150 dm_digtable.pre_igvalue = 0x0;
151 dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT;
152 dm_digtable.presta_connectstate = DIG_STA_DISCONNECT;
153 dm_digtable.curmultista_connectstate = DIG_MULTISTA_DISCONNECT;
154 dm_digtable.rssi_lowthresh = DM_DIG_THRESH_LOW;
155 dm_digtable.rssi_highthresh = DM_DIG_THRESH_HIGH;
156 dm_digtable.fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
157 dm_digtable.fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
158 dm_digtable.rx_gain_range_max = DM_DIG_MAX;
159 dm_digtable.rx_gain_range_min = DM_DIG_MIN;
160 dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT;
161 dm_digtable.backoff_val_range_max = DM_DIG_BACKOFF_MAX;
162 dm_digtable.backoff_val_range_min = DM_DIG_BACKOFF_MIN;
163 dm_digtable.pre_cck_pd_state = CCK_PD_STAGE_MAX;
164 dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX;
165}
166
167static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
168{
169 struct rtl_priv *rtlpriv = rtl_priv(hw);
170 long rssi_val_min = 0;
171
172 if ((dm_digtable.curmultista_connectstate == DIG_MULTISTA_CONNECT) &&
173 (dm_digtable.cursta_connectctate == DIG_STA_CONNECT)) {
174 if (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb != 0)
175 rssi_val_min =
176 (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb >
177 rtlpriv->dm.undecorated_smoothed_pwdb) ?
178 rtlpriv->dm.undecorated_smoothed_pwdb :
179 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
180 else
181 rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
182 } else if (dm_digtable.cursta_connectctate == DIG_STA_CONNECT ||
183 dm_digtable.cursta_connectctate == DIG_STA_BEFORE_CONNECT) {
184 rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
185 } else if (dm_digtable.curmultista_connectstate ==
186 DIG_MULTISTA_CONNECT) {
187 rssi_val_min = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
188 }
189
190 return (u8) rssi_val_min;
191}
192
193static void rtl92c_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
194{
195 u32 ret_value;
196 struct rtl_priv *rtlpriv = rtl_priv(hw);
197 struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
198
199 ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, MASKDWORD);
200 falsealm_cnt->cnt_parity_fail = ((ret_value & 0xffff0000) >> 16);
201
202 ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, MASKDWORD);
203 falsealm_cnt->cnt_rate_illegal = (ret_value & 0xffff);
204 falsealm_cnt->cnt_crc8_fail = ((ret_value & 0xffff0000) >> 16);
205
206 ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD);
207 falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff);
208 falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail +
209 falsealm_cnt->cnt_rate_illegal +
210 falsealm_cnt->cnt_crc8_fail + falsealm_cnt->cnt_mcs_fail;
211
212 rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(14), 1);
213 ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, MASKBYTE0);
214 falsealm_cnt->cnt_cck_fail = ret_value;
215
216 ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERUPPER, MASKBYTE3);
217 falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8;
218 falsealm_cnt->cnt_all = (falsealm_cnt->cnt_parity_fail +
219 falsealm_cnt->cnt_rate_illegal +
220 falsealm_cnt->cnt_crc8_fail +
221 falsealm_cnt->cnt_mcs_fail +
222 falsealm_cnt->cnt_cck_fail);
223
224 rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 1);
225 rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 0);
226 rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 0);
227 rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 2);
228
229 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
230 ("cnt_parity_fail = %d, cnt_rate_illegal = %d, "
231 "cnt_crc8_fail = %d, cnt_mcs_fail = %d\n",
232 falsealm_cnt->cnt_parity_fail,
233 falsealm_cnt->cnt_rate_illegal,
234 falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail));
235
236 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
237 ("cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n",
238 falsealm_cnt->cnt_ofdm_fail,
239 falsealm_cnt->cnt_cck_fail, falsealm_cnt->cnt_all));
240}
241
242static void rtl92c_dm_ctrl_initgain_by_fa(struct ieee80211_hw *hw)
243{
244 struct rtl_priv *rtlpriv = rtl_priv(hw);
245 u8 value_igi = dm_digtable.cur_igvalue;
246
247 if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH0)
248 value_igi--;
249 else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH1)
250 value_igi += 0;
251 else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH2)
252 value_igi++;
253 else if (rtlpriv->falsealm_cnt.cnt_all >= DM_DIG_FA_TH2)
254 value_igi += 2;
255 if (value_igi > DM_DIG_FA_UPPER)
256 value_igi = DM_DIG_FA_UPPER;
257 else if (value_igi < DM_DIG_FA_LOWER)
258 value_igi = DM_DIG_FA_LOWER;
259 if (rtlpriv->falsealm_cnt.cnt_all > 10000)
260 value_igi = 0x32;
261
262 dm_digtable.cur_igvalue = value_igi;
263 rtl92c_dm_write_dig(hw);
264}
265
266static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw)
267{
268 struct rtl_priv *rtlpriv = rtl_priv(hw);
269
270 if (rtlpriv->falsealm_cnt.cnt_all > dm_digtable.fa_highthresh) {
271 if ((dm_digtable.backoff_val - 2) <
272 dm_digtable.backoff_val_range_min)
273 dm_digtable.backoff_val =
274 dm_digtable.backoff_val_range_min;
275 else
276 dm_digtable.backoff_val -= 2;
277 } else if (rtlpriv->falsealm_cnt.cnt_all < dm_digtable.fa_lowthresh) {
278 if ((dm_digtable.backoff_val + 2) >
279 dm_digtable.backoff_val_range_max)
280 dm_digtable.backoff_val =
281 dm_digtable.backoff_val_range_max;
282 else
283 dm_digtable.backoff_val += 2;
284 }
285
286 if ((dm_digtable.rssi_val_min + 10 - dm_digtable.backoff_val) >
287 dm_digtable.rx_gain_range_max)
288 dm_digtable.cur_igvalue = dm_digtable.rx_gain_range_max;
289 else if ((dm_digtable.rssi_val_min + 10 -
290 dm_digtable.backoff_val) < dm_digtable.rx_gain_range_min)
291 dm_digtable.cur_igvalue = dm_digtable.rx_gain_range_min;
292 else
293 dm_digtable.cur_igvalue = dm_digtable.rssi_val_min + 10 -
294 dm_digtable.backoff_val;
295
296 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
297 ("rssi_val_min = %x backoff_val %x\n",
298 dm_digtable.rssi_val_min, dm_digtable.backoff_val));
299
300 rtl92c_dm_write_dig(hw);
301}
302
303static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw)
304{
305 static u8 binitialized; /* initialized to false */
306 struct rtl_priv *rtlpriv = rtl_priv(hw);
307 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
308 long rssi_strength = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
309 bool multi_sta = false;
310
311 if (mac->opmode == NL80211_IFTYPE_ADHOC)
312 multi_sta = true;
313
314 if ((multi_sta == false) || (dm_digtable.cursta_connectctate !=
315 DIG_STA_DISCONNECT)) {
316 binitialized = false;
317 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
318 return;
319 } else if (binitialized == false) {
320 binitialized = true;
321 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_0;
322 dm_digtable.cur_igvalue = 0x20;
323 rtl92c_dm_write_dig(hw);
324 }
325
326 if (dm_digtable.curmultista_connectstate == DIG_MULTISTA_CONNECT) {
327 if ((rssi_strength < dm_digtable.rssi_lowthresh) &&
328 (dm_digtable.dig_ext_port_stage != DIG_EXT_PORT_STAGE_1)) {
329
330 if (dm_digtable.dig_ext_port_stage ==
331 DIG_EXT_PORT_STAGE_2) {
332 dm_digtable.cur_igvalue = 0x20;
333 rtl92c_dm_write_dig(hw);
334 }
335
336 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_1;
337 } else if (rssi_strength > dm_digtable.rssi_highthresh) {
338 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_2;
339 rtl92c_dm_ctrl_initgain_by_fa(hw);
340 }
341 } else if (dm_digtable.dig_ext_port_stage != DIG_EXT_PORT_STAGE_0) {
342 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_0;
343 dm_digtable.cur_igvalue = 0x20;
344 rtl92c_dm_write_dig(hw);
345 }
346
347 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
348 ("curmultista_connectstate = "
349 "%x dig_ext_port_stage %x\n",
350 dm_digtable.curmultista_connectstate,
351 dm_digtable.dig_ext_port_stage));
352}
353
354static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw)
355{
356 struct rtl_priv *rtlpriv = rtl_priv(hw);
357
358 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
359 ("presta_connectstate = %x,"
360 " cursta_connectctate = %x\n",
361 dm_digtable.presta_connectstate,
362 dm_digtable.cursta_connectctate));
363
364 if (dm_digtable.presta_connectstate == dm_digtable.cursta_connectctate
365 || dm_digtable.cursta_connectctate == DIG_STA_BEFORE_CONNECT
366 || dm_digtable.cursta_connectctate == DIG_STA_CONNECT) {
367
368 if (dm_digtable.cursta_connectctate != DIG_STA_DISCONNECT) {
369 dm_digtable.rssi_val_min =
370 rtl92c_dm_initial_gain_min_pwdb(hw);
371 rtl92c_dm_ctrl_initgain_by_rssi(hw);
372 }
373 } else {
374 dm_digtable.rssi_val_min = 0;
375 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
376 dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT;
377 dm_digtable.cur_igvalue = 0x20;
378 dm_digtable.pre_igvalue = 0;
379 rtl92c_dm_write_dig(hw);
380 }
381}
382
383static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
384{
385 struct rtl_priv *rtlpriv = rtl_priv(hw);
386 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
387
388 if (dm_digtable.cursta_connectctate == DIG_STA_CONNECT) {
389 dm_digtable.rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw);
390
391 if (dm_digtable.pre_cck_pd_state == CCK_PD_STAGE_LowRssi) {
392 if (dm_digtable.rssi_val_min <= 25)
393 dm_digtable.cur_cck_pd_state =
394 CCK_PD_STAGE_LowRssi;
395 else
396 dm_digtable.cur_cck_pd_state =
397 CCK_PD_STAGE_HighRssi;
398 } else {
399 if (dm_digtable.rssi_val_min <= 20)
400 dm_digtable.cur_cck_pd_state =
401 CCK_PD_STAGE_LowRssi;
402 else
403 dm_digtable.cur_cck_pd_state =
404 CCK_PD_STAGE_HighRssi;
405 }
406 } else {
407 dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX;
408 }
409
410 if (dm_digtable.pre_cck_pd_state != dm_digtable.cur_cck_pd_state) {
411 if (dm_digtable.cur_cck_pd_state == CCK_PD_STAGE_LowRssi) {
412 if (rtlpriv->falsealm_cnt.cnt_cck_fail > 800)
413 dm_digtable.cur_cck_fa_state =
414 CCK_FA_STAGE_High;
415 else
416 dm_digtable.cur_cck_fa_state = CCK_FA_STAGE_Low;
417
418 if (dm_digtable.pre_cck_fa_state !=
419 dm_digtable.cur_cck_fa_state) {
420 if (dm_digtable.cur_cck_fa_state ==
421 CCK_FA_STAGE_Low)
422 rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2,
423 0x83);
424 else
425 rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2,
426 0xcd);
427
428 dm_digtable.pre_cck_fa_state =
429 dm_digtable.cur_cck_fa_state;
430 }
431
432 rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x40);
433
434 if (IS_92C_SERIAL(rtlhal->version))
435 rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT,
436 MASKBYTE2, 0xd7);
437 } else {
438 rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd);
439 rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x47);
440
441 if (IS_92C_SERIAL(rtlhal->version))
442 rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT,
443 MASKBYTE2, 0xd3);
444 }
445 dm_digtable.pre_cck_pd_state = dm_digtable.cur_cck_pd_state;
446 }
447
448 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
449 ("CCKPDStage=%x\n", dm_digtable.cur_cck_pd_state));
450
451 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
452 ("is92C=%x\n", IS_92C_SERIAL(rtlhal->version)));
453}
454
455static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw)
456{
457 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
458
459 if (mac->act_scanning == true)
460 return;
461
462 if ((mac->link_state > MAC80211_NOLINK) &&
463 (mac->link_state < MAC80211_LINKED))
464 dm_digtable.cursta_connectctate = DIG_STA_BEFORE_CONNECT;
465 else if (mac->link_state >= MAC80211_LINKED)
466 dm_digtable.cursta_connectctate = DIG_STA_CONNECT;
467 else
468 dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT;
469
470 rtl92c_dm_initial_gain_sta(hw);
471 rtl92c_dm_initial_gain_multi_sta(hw);
472 rtl92c_dm_cck_packet_detection_thresh(hw);
473
474 dm_digtable.presta_connectstate = dm_digtable.cursta_connectctate;
475
476}
477
478static void rtl92c_dm_dig(struct ieee80211_hw *hw)
479{
480 struct rtl_priv *rtlpriv = rtl_priv(hw);
481
482 if (rtlpriv->dm.dm_initialgain_enable == false)
483 return;
484 if (dm_digtable.dig_enable_flag == false)
485 return;
486
487 rtl92c_dm_ctrl_initgain_by_twoport(hw);
488
489}
490
491static void rtl92c_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
492{
493 struct rtl_priv *rtlpriv = rtl_priv(hw);
494
495 rtlpriv->dm.dynamic_txpower_enable = false;
496
497 rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
498 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
499}
500
501void rtl92c_dm_write_dig(struct ieee80211_hw *hw)
502{
503 struct rtl_priv *rtlpriv = rtl_priv(hw);
504
505 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
506 ("cur_igvalue = 0x%x, "
507 "pre_igvalue = 0x%x, backoff_val = %d\n",
508 dm_digtable.cur_igvalue, dm_digtable.pre_igvalue,
509 dm_digtable.backoff_val));
510
511 if (dm_digtable.pre_igvalue != dm_digtable.cur_igvalue) {
512 rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f,
513 dm_digtable.cur_igvalue);
514 rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f,
515 dm_digtable.cur_igvalue);
516
517 dm_digtable.pre_igvalue = dm_digtable.cur_igvalue;
518 }
519}
520
521static void rtl92c_dm_pwdb_monitor(struct ieee80211_hw *hw)
522{
523 struct rtl_priv *rtlpriv = rtl_priv(hw);
524 long tmpentry_max_pwdb = 0, tmpentry_min_pwdb = 0xff;
525
526 u8 h2c_parameter[3] = { 0 };
527
528 return;
529
530 if (tmpentry_max_pwdb != 0) {
531 rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb =
532 tmpentry_max_pwdb;
533 } else {
534 rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb = 0;
535 }
536
537 if (tmpentry_min_pwdb != 0xff) {
538 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb =
539 tmpentry_min_pwdb;
540 } else {
541 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb = 0;
542 }
543
544 h2c_parameter[2] = (u8) (rtlpriv->dm.undecorated_smoothed_pwdb & 0xFF);
545 h2c_parameter[0] = 0;
546
547 rtl92c_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, h2c_parameter);
548}
549
550void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw)
551{
552 struct rtl_priv *rtlpriv = rtl_priv(hw);
553 rtlpriv->dm.current_turbo_edca = false;
554 rtlpriv->dm.is_any_nonbepkts = false;
555 rtlpriv->dm.is_cur_rdlstate = false;
556}
557
558static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw)
559{
560 struct rtl_priv *rtlpriv = rtl_priv(hw);
561 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
562 static u64 last_txok_cnt;
563 static u64 last_rxok_cnt;
564 u64 cur_txok_cnt;
565 u64 cur_rxok_cnt;
566 u32 edca_be_ul = 0x5ea42b;
567 u32 edca_be_dl = 0x5ea42b;
568
569 if (mac->opmode == NL80211_IFTYPE_ADHOC)
570 goto dm_checkedcaturbo_exit;
571
572 if (mac->link_state != MAC80211_LINKED) {
573 rtlpriv->dm.current_turbo_edca = false;
574 return;
575 }
576
577 if (!mac->ht_enable) { /*FIX MERGE */
578 if (!(edca_be_ul & 0xffff0000))
579 edca_be_ul |= 0x005e0000;
580
581 if (!(edca_be_dl & 0xffff0000))
582 edca_be_dl |= 0x005e0000;
583 }
584
585 if ((!rtlpriv->dm.is_any_nonbepkts) &&
586 (!rtlpriv->dm.disable_framebursting)) {
587 cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt;
588 cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt;
589 if (cur_rxok_cnt > 4 * cur_txok_cnt) {
590 if (!rtlpriv->dm.is_cur_rdlstate ||
591 !rtlpriv->dm.current_turbo_edca) {
592 rtl_write_dword(rtlpriv,
593 REG_EDCA_BE_PARAM,
594 edca_be_dl);
595 rtlpriv->dm.is_cur_rdlstate = true;
596 }
597 } else {
598 if (rtlpriv->dm.is_cur_rdlstate ||
599 !rtlpriv->dm.current_turbo_edca) {
600 rtl_write_dword(rtlpriv,
601 REG_EDCA_BE_PARAM,
602 edca_be_ul);
603 rtlpriv->dm.is_cur_rdlstate = false;
604 }
605 }
606 rtlpriv->dm.current_turbo_edca = true;
607 } else {
608 if (rtlpriv->dm.current_turbo_edca) {
609 u8 tmp = AC0_BE;
610 rtlpriv->cfg->ops->set_hw_reg(hw,
611 HW_VAR_AC_PARAM,
612 (u8 *) (&tmp));
613 rtlpriv->dm.current_turbo_edca = false;
614 }
615 }
616
617dm_checkedcaturbo_exit:
618 rtlpriv->dm.is_any_nonbepkts = false;
619 last_txok_cnt = rtlpriv->stats.txbytesunicast;
620 last_rxok_cnt = rtlpriv->stats.rxbytesunicast;
621}
622
623static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
624 *hw)
625{
626 struct rtl_priv *rtlpriv = rtl_priv(hw);
627 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
628 struct rtl_phy *rtlphy = &(rtlpriv->phy);
629 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
630 u8 thermalvalue, delta, delta_lck, delta_iqk;
631 long ele_a, ele_d, temp_cck, val_x, value32;
632 long val_y, ele_c;
633 u8 ofdm_index[2], cck_index, ofdm_index_old[2], cck_index_old;
634 int i;
635 bool is2t = IS_92C_SERIAL(rtlhal->version);
636 u8 txpwr_level[2] = {0, 0};
637 u8 ofdm_min_index = 6, rf;
638
639 rtlpriv->dm.txpower_trackingInit = true;
640 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
641 ("rtl92c_dm_txpower_tracking_callback_thermalmeter\n"));
642
643 thermalvalue = (u8) rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0x1f);
644
645 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
646 ("Readback Thermal Meter = 0x%x pre thermal meter 0x%x "
647 "eeprom_thermalmeter 0x%x\n",
648 thermalvalue, rtlpriv->dm.thermalvalue,
649 rtlefuse->eeprom_thermalmeter));
650
651 rtl92c_phy_ap_calibrate(hw, (thermalvalue -
652 rtlefuse->eeprom_thermalmeter));
653 if (is2t)
654 rf = 2;
655 else
656 rf = 1;
657
658 if (thermalvalue) {
659 ele_d = rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
660 MASKDWORD) & MASKOFDM_D;
661
662 for (i = 0; i < OFDM_TABLE_LENGTH; i++) {
663 if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
664 ofdm_index_old[0] = (u8) i;
665
666 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
667 ("Initial pathA ele_d reg0x%x = 0x%lx, "
668 "ofdm_index=0x%x\n",
669 ROFDM0_XATXIQIMBALANCE,
670 ele_d, ofdm_index_old[0]));
671 break;
672 }
673 }
674
675 if (is2t) {
676 ele_d = rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE,
677 MASKDWORD) & MASKOFDM_D;
678
679 for (i = 0; i < OFDM_TABLE_LENGTH; i++) {
680 if (ele_d == (ofdmswing_table[i] &
681 MASKOFDM_D)) {
682 ofdm_index_old[1] = (u8) i;
683
684 RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
685 DBG_LOUD,
686 ("Initial pathB ele_d reg0x%x = "
687 "0x%lx, ofdm_index=0x%x\n",
688 ROFDM0_XBTXIQIMBALANCE, ele_d,
689 ofdm_index_old[1]));
690 break;
691 }
692 }
693 }
694
695 temp_cck =
696 rtl_get_bbreg(hw, RCCK0_TXFILTER2, MASKDWORD) & MASKCCK;
697
698 for (i = 0; i < CCK_TABLE_LENGTH; i++) {
699 if (rtlpriv->dm.cck_inch14) {
700 if (memcmp((void *)&temp_cck,
701 (void *)&cckswing_table_ch14[i][2],
702 4) == 0) {
703 cck_index_old = (u8) i;
704
705 RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
706 DBG_LOUD,
707 ("Initial reg0x%x = 0x%lx, "
708 "cck_index=0x%x, ch 14 %d\n",
709 RCCK0_TXFILTER2, temp_cck,
710 cck_index_old,
711 rtlpriv->dm.cck_inch14));
712 break;
713 }
714 } else {
715 if (memcmp((void *)&temp_cck,
716 (void *)
717 &cckswing_table_ch1ch13[i][2],
718 4) == 0) {
719 cck_index_old = (u8) i;
720
721 RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
722 DBG_LOUD,
723 ("Initial reg0x%x = 0x%lx, "
724 "cck_index=0x%x, ch14 %d\n",
725 RCCK0_TXFILTER2, temp_cck,
726 cck_index_old,
727 rtlpriv->dm.cck_inch14));
728 break;
729 }
730 }
731 }
732
733 if (!rtlpriv->dm.thermalvalue) {
734 rtlpriv->dm.thermalvalue =
735 rtlefuse->eeprom_thermalmeter;
736 rtlpriv->dm.thermalvalue_lck = thermalvalue;
737 rtlpriv->dm.thermalvalue_iqk = thermalvalue;
738 for (i = 0; i < rf; i++)
739 rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i];
740 rtlpriv->dm.cck_index = cck_index_old;
741 }
742
743 delta = (thermalvalue > rtlpriv->dm.thermalvalue) ?
744 (thermalvalue - rtlpriv->dm.thermalvalue) :
745 (rtlpriv->dm.thermalvalue - thermalvalue);
746
747 delta_lck = (thermalvalue > rtlpriv->dm.thermalvalue_lck) ?
748 (thermalvalue - rtlpriv->dm.thermalvalue_lck) :
749 (rtlpriv->dm.thermalvalue_lck - thermalvalue);
750
751 delta_iqk = (thermalvalue > rtlpriv->dm.thermalvalue_iqk) ?
752 (thermalvalue - rtlpriv->dm.thermalvalue_iqk) :
753 (rtlpriv->dm.thermalvalue_iqk - thermalvalue);
754
755 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
756 ("Readback Thermal Meter = 0x%x pre thermal meter 0x%x "
757 "eeprom_thermalmeter 0x%x delta 0x%x "
758 "delta_lck 0x%x delta_iqk 0x%x\n",
759 thermalvalue, rtlpriv->dm.thermalvalue,
760 rtlefuse->eeprom_thermalmeter, delta, delta_lck,
761 delta_iqk));
762
763 if (delta_lck > 1) {
764 rtlpriv->dm.thermalvalue_lck = thermalvalue;
765 rtl92c_phy_lc_calibrate(hw);
766 }
767
768 if (delta > 0 && rtlpriv->dm.txpower_track_control) {
769 if (thermalvalue > rtlpriv->dm.thermalvalue) {
770 for (i = 0; i < rf; i++)
771 rtlpriv->dm.ofdm_index[i] -= delta;
772 rtlpriv->dm.cck_index -= delta;
773 } else {
774 for (i = 0; i < rf; i++)
775 rtlpriv->dm.ofdm_index[i] += delta;
776 rtlpriv->dm.cck_index += delta;
777 }
778
779 if (is2t) {
780 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
781 ("temp OFDM_A_index=0x%x, "
782 "OFDM_B_index=0x%x,"
783 "cck_index=0x%x\n",
784 rtlpriv->dm.ofdm_index[0],
785 rtlpriv->dm.ofdm_index[1],
786 rtlpriv->dm.cck_index));
787 } else {
788 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
789 ("temp OFDM_A_index=0x%x,"
790 "cck_index=0x%x\n",
791 rtlpriv->dm.ofdm_index[0],
792 rtlpriv->dm.cck_index));
793 }
794
795 if (thermalvalue > rtlefuse->eeprom_thermalmeter) {
796 for (i = 0; i < rf; i++)
797 ofdm_index[i] =
798 rtlpriv->dm.ofdm_index[i]
799 + 1;
800 cck_index = rtlpriv->dm.cck_index + 1;
801 } else {
802 for (i = 0; i < rf; i++)
803 ofdm_index[i] =
804 rtlpriv->dm.ofdm_index[i];
805 cck_index = rtlpriv->dm.cck_index;
806 }
807
808 for (i = 0; i < rf; i++) {
809 if (txpwr_level[i] >= 0 &&
810 txpwr_level[i] <= 26) {
811 if (thermalvalue >
812 rtlefuse->eeprom_thermalmeter) {
813 if (delta < 5)
814 ofdm_index[i] -= 1;
815
816 else
817 ofdm_index[i] -= 2;
818 } else if (delta > 5 && thermalvalue <
819 rtlefuse->
820 eeprom_thermalmeter) {
821 ofdm_index[i] += 1;
822 }
823 } else if (txpwr_level[i] >= 27 &&
824 txpwr_level[i] <= 32
825 && thermalvalue >
826 rtlefuse->eeprom_thermalmeter) {
827 if (delta < 5)
828 ofdm_index[i] -= 1;
829
830 else
831 ofdm_index[i] -= 2;
832 } else if (txpwr_level[i] >= 32 &&
833 txpwr_level[i] <= 38 &&
834 thermalvalue >
835 rtlefuse->eeprom_thermalmeter
836 && delta > 5) {
837 ofdm_index[i] -= 1;
838 }
839 }
840
841 if (txpwr_level[i] >= 0 && txpwr_level[i] <= 26) {
842 if (thermalvalue >
843 rtlefuse->eeprom_thermalmeter) {
844 if (delta < 5)
845 cck_index -= 1;
846
847 else
848 cck_index -= 2;
849 } else if (delta > 5 && thermalvalue <
850 rtlefuse->eeprom_thermalmeter) {
851 cck_index += 1;
852 }
853 } else if (txpwr_level[i] >= 27 &&
854 txpwr_level[i] <= 32 &&
855 thermalvalue >
856 rtlefuse->eeprom_thermalmeter) {
857 if (delta < 5)
858 cck_index -= 1;
859
860 else
861 cck_index -= 2;
862 } else if (txpwr_level[i] >= 32 &&
863 txpwr_level[i] <= 38 &&
864 thermalvalue > rtlefuse->eeprom_thermalmeter
865 && delta > 5) {
866 cck_index -= 1;
867 }
868
869 for (i = 0; i < rf; i++) {
870 if (ofdm_index[i] > OFDM_TABLE_SIZE - 1)
871 ofdm_index[i] = OFDM_TABLE_SIZE - 1;
872
873 else if (ofdm_index[i] < ofdm_min_index)
874 ofdm_index[i] = ofdm_min_index;
875 }
876
877 if (cck_index > CCK_TABLE_SIZE - 1)
878 cck_index = CCK_TABLE_SIZE - 1;
879 else if (cck_index < 0)
880 cck_index = 0;
881
882 if (is2t) {
883 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
884 ("new OFDM_A_index=0x%x, "
885 "OFDM_B_index=0x%x,"
886 "cck_index=0x%x\n",
887 ofdm_index[0], ofdm_index[1],
888 cck_index));
889 } else {
890 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
891 ("new OFDM_A_index=0x%x,"
892 "cck_index=0x%x\n",
893 ofdm_index[0], cck_index));
894 }
895 }
896
897 if (rtlpriv->dm.txpower_track_control && delta != 0) {
898 ele_d =
899 (ofdmswing_table[ofdm_index[0]] & 0xFFC00000) >> 22;
900 val_x = rtlphy->reg_e94;
901 val_y = rtlphy->reg_e9c;
902
903 if (val_x != 0) {
904 if ((val_x & 0x00000200) != 0)
905 val_x = val_x | 0xFFFFFC00;
906 ele_a = ((val_x * ele_d) >> 8) & 0x000003FF;
907
908 if ((val_y & 0x00000200) != 0)
909 val_y = val_y | 0xFFFFFC00;
910 ele_c = ((val_y * ele_d) >> 8) & 0x000003FF;
911
912 value32 = (ele_d << 22) |
913 ((ele_c & 0x3F) << 16) | ele_a;
914
915 rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
916 MASKDWORD, value32);
917
918 value32 = (ele_c & 0x000003C0) >> 6;
919 rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS,
920 value32);
921
922 value32 = ((val_x * ele_d) >> 7) & 0x01;
923 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
924 BIT(31), value32);
925
926 value32 = ((val_y * ele_d) >> 7) & 0x01;
927 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
928 BIT(29), value32);
929 } else {
930 rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
931 MASKDWORD,
932 ofdmswing_table[ofdm_index[0]]);
933
934 rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS,
935 0x00);
936 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
937 BIT(31) | BIT(29), 0x00);
938 }
939
940 if (!rtlpriv->dm.cck_inch14) {
941 rtl_write_byte(rtlpriv, 0xa22,
942 cckswing_table_ch1ch13[cck_index]
943 [0]);
944 rtl_write_byte(rtlpriv, 0xa23,
945 cckswing_table_ch1ch13[cck_index]
946 [1]);
947 rtl_write_byte(rtlpriv, 0xa24,
948 cckswing_table_ch1ch13[cck_index]
949 [2]);
950 rtl_write_byte(rtlpriv, 0xa25,
951 cckswing_table_ch1ch13[cck_index]
952 [3]);
953 rtl_write_byte(rtlpriv, 0xa26,
954 cckswing_table_ch1ch13[cck_index]
955 [4]);
956 rtl_write_byte(rtlpriv, 0xa27,
957 cckswing_table_ch1ch13[cck_index]
958 [5]);
959 rtl_write_byte(rtlpriv, 0xa28,
960 cckswing_table_ch1ch13[cck_index]
961 [6]);
962 rtl_write_byte(rtlpriv, 0xa29,
963 cckswing_table_ch1ch13[cck_index]
964 [7]);
965 } else {
966 rtl_write_byte(rtlpriv, 0xa22,
967 cckswing_table_ch14[cck_index]
968 [0]);
969 rtl_write_byte(rtlpriv, 0xa23,
970 cckswing_table_ch14[cck_index]
971 [1]);
972 rtl_write_byte(rtlpriv, 0xa24,
973 cckswing_table_ch14[cck_index]
974 [2]);
975 rtl_write_byte(rtlpriv, 0xa25,
976 cckswing_table_ch14[cck_index]
977 [3]);
978 rtl_write_byte(rtlpriv, 0xa26,
979 cckswing_table_ch14[cck_index]
980 [4]);
981 rtl_write_byte(rtlpriv, 0xa27,
982 cckswing_table_ch14[cck_index]
983 [5]);
984 rtl_write_byte(rtlpriv, 0xa28,
985 cckswing_table_ch14[cck_index]
986 [6]);
987 rtl_write_byte(rtlpriv, 0xa29,
988 cckswing_table_ch14[cck_index]
989 [7]);
990 }
991
992 if (is2t) {
993 ele_d = (ofdmswing_table[ofdm_index[1]] &
994 0xFFC00000) >> 22;
995
996 val_x = rtlphy->reg_eb4;
997 val_y = rtlphy->reg_ebc;
998
999 if (val_x != 0) {
1000 if ((val_x & 0x00000200) != 0)
1001 val_x = val_x | 0xFFFFFC00;
1002 ele_a = ((val_x * ele_d) >> 8) &
1003 0x000003FF;
1004
1005 if ((val_y & 0x00000200) != 0)
1006 val_y = val_y | 0xFFFFFC00;
1007 ele_c = ((val_y * ele_d) >> 8) &
1008 0x00003FF;
1009
1010 value32 = (ele_d << 22) |
1011 ((ele_c & 0x3F) << 16) | ele_a;
1012 rtl_set_bbreg(hw,
1013 ROFDM0_XBTXIQIMBALANCE,
1014 MASKDWORD, value32);
1015
1016 value32 = (ele_c & 0x000003C0) >> 6;
1017 rtl_set_bbreg(hw, ROFDM0_XDTXAFE,
1018 MASKH4BITS, value32);
1019
1020 value32 = ((val_x * ele_d) >> 7) & 0x01;
1021 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
1022 BIT(27), value32);
1023
1024 value32 = ((val_y * ele_d) >> 7) & 0x01;
1025 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
1026 BIT(25), value32);
1027 } else {
1028 rtl_set_bbreg(hw,
1029 ROFDM0_XBTXIQIMBALANCE,
1030 MASKDWORD,
1031 ofdmswing_table[ofdm_index
1032 [1]]);
1033 rtl_set_bbreg(hw, ROFDM0_XDTXAFE,
1034 MASKH4BITS, 0x00);
1035 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
1036 BIT(27) | BIT(25), 0x00);
1037 }
1038
1039 }
1040 }
1041
1042 if (delta_iqk > 3) {
1043 rtlpriv->dm.thermalvalue_iqk = thermalvalue;
1044 rtl92c_phy_iq_calibrate(hw, false);
1045 }
1046
1047 if (rtlpriv->dm.txpower_track_control)
1048 rtlpriv->dm.thermalvalue = thermalvalue;
1049 }
1050
1051 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, ("<===\n"));
1052
1053}
1054
1055static void rtl92c_dm_initialize_txpower_tracking_thermalmeter(
1056 struct ieee80211_hw *hw)
1057{
1058 struct rtl_priv *rtlpriv = rtl_priv(hw);
1059
1060 rtlpriv->dm.txpower_tracking = true;
1061 rtlpriv->dm.txpower_trackingInit = false;
1062
1063 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
1064 ("pMgntInfo->txpower_tracking = %d\n",
1065 rtlpriv->dm.txpower_tracking));
1066}
1067
1068static void rtl92c_dm_initialize_txpower_tracking(struct ieee80211_hw *hw)
1069{
1070 rtl92c_dm_initialize_txpower_tracking_thermalmeter(hw);
1071}
1072
1073static void rtl92c_dm_txpower_tracking_directcall(struct ieee80211_hw *hw)
1074{
1075 rtl92c_dm_txpower_tracking_callback_thermalmeter(hw);
1076}
1077
1078static void rtl92c_dm_check_txpower_tracking_thermal_meter(
1079 struct ieee80211_hw *hw)
1080{
1081 struct rtl_priv *rtlpriv = rtl_priv(hw);
1082 static u8 tm_trigger;
1083
1084 if (!rtlpriv->dm.txpower_tracking)
1085 return;
1086
1087 if (!tm_trigger) {
1088 rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, RFREG_OFFSET_MASK,
1089 0x60);
1090 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
1091 ("Trigger 92S Thermal Meter!!\n"));
1092 tm_trigger = 1;
1093 return;
1094 } else {
1095 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
1096 ("Schedule TxPowerTracking direct call!!\n"));
1097 rtl92c_dm_txpower_tracking_directcall(hw);
1098 tm_trigger = 0;
1099 }
1100}
1101
1102void rtl92c_dm_check_txpower_tracking(struct ieee80211_hw *hw)
1103{
1104 rtl92c_dm_check_txpower_tracking_thermal_meter(hw);
1105}
1106
1107void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
1108{
1109 struct rtl_priv *rtlpriv = rtl_priv(hw);
1110 struct rate_adaptive *p_ra = &(rtlpriv->ra);
1111
1112 p_ra->ratr_state = DM_RATR_STA_INIT;
1113 p_ra->pre_ratr_state = DM_RATR_STA_INIT;
1114
1115 if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER)
1116 rtlpriv->dm.useramask = true;
1117 else
1118 rtlpriv->dm.useramask = false;
1119
1120}
1121
1122static void rtl92c_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
1123{
1124 struct rtl_priv *rtlpriv = rtl_priv(hw);
1125 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1126 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1127 struct rate_adaptive *p_ra = &(rtlpriv->ra);
1128 u32 low_rssithresh_for_ra, high_rssithresh_for_ra;
1129
1130 if (is_hal_stop(rtlhal)) {
1131 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
1132 ("<---- driver is going to unload\n"));
1133 return;
1134 }
1135
1136 if (!rtlpriv->dm.useramask) {
1137 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
1138 ("<---- driver does not control rate adaptive mask\n"));
1139 return;
1140 }
1141
1142 if (mac->link_state == MAC80211_LINKED) {
1143
1144 switch (p_ra->pre_ratr_state) {
1145 case DM_RATR_STA_HIGH:
1146 high_rssithresh_for_ra = 50;
1147 low_rssithresh_for_ra = 20;
1148 break;
1149 case DM_RATR_STA_MIDDLE:
1150 high_rssithresh_for_ra = 55;
1151 low_rssithresh_for_ra = 20;
1152 break;
1153 case DM_RATR_STA_LOW:
1154 high_rssithresh_for_ra = 50;
1155 low_rssithresh_for_ra = 25;
1156 break;
1157 default:
1158 high_rssithresh_for_ra = 50;
1159 low_rssithresh_for_ra = 20;
1160 break;
1161 }
1162
1163 if (rtlpriv->dm.undecorated_smoothed_pwdb >
1164 (long)high_rssithresh_for_ra)
1165 p_ra->ratr_state = DM_RATR_STA_HIGH;
1166 else if (rtlpriv->dm.undecorated_smoothed_pwdb >
1167 (long)low_rssithresh_for_ra)
1168 p_ra->ratr_state = DM_RATR_STA_MIDDLE;
1169 else
1170 p_ra->ratr_state = DM_RATR_STA_LOW;
1171
1172 if (p_ra->pre_ratr_state != p_ra->ratr_state) {
1173 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
1174 ("RSSI = %ld\n",
1175 rtlpriv->dm.undecorated_smoothed_pwdb));
1176 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
1177 ("RSSI_LEVEL = %d\n", p_ra->ratr_state));
1178 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
1179 ("PreState = %d, CurState = %d\n",
1180 p_ra->pre_ratr_state, p_ra->ratr_state));
1181
1182 rtlpriv->cfg->ops->update_rate_mask(hw,
1183 p_ra->ratr_state);
1184
1185 p_ra->pre_ratr_state = p_ra->ratr_state;
1186 }
1187 }
1188}
1189
1190static void rtl92c_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw)
1191{
1192 dm_pstable.pre_ccastate = CCA_MAX;
1193 dm_pstable.cur_ccasate = CCA_MAX;
1194 dm_pstable.pre_rfstate = RF_MAX;
1195 dm_pstable.cur_rfstate = RF_MAX;
1196 dm_pstable.rssi_val_min = 0;
1197}
1198
1199static void rtl92c_dm_1r_cca(struct ieee80211_hw *hw)
1200{
1201 struct rtl_priv *rtlpriv = rtl_priv(hw);
1202 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1203
1204 if (dm_pstable.rssi_val_min != 0) {
1205 if (dm_pstable.pre_ccastate == CCA_2R) {
1206 if (dm_pstable.rssi_val_min >= 35)
1207 dm_pstable.cur_ccasate = CCA_1R;
1208 else
1209 dm_pstable.cur_ccasate = CCA_2R;
1210 } else {
1211 if (dm_pstable.rssi_val_min <= 30)
1212 dm_pstable.cur_ccasate = CCA_2R;
1213 else
1214 dm_pstable.cur_ccasate = CCA_1R;
1215 }
1216 } else {
1217 dm_pstable.cur_ccasate = CCA_MAX;
1218 }
1219
1220 if (dm_pstable.pre_ccastate != dm_pstable.cur_ccasate) {
1221 if (dm_pstable.cur_ccasate == CCA_1R) {
1222 if (get_rf_type(rtlphy) == RF_2T2R) {
1223 rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE,
1224 MASKBYTE0, 0x13);
1225 rtl_set_bbreg(hw, 0xe70, MASKBYTE3, 0x20);
1226 } else {
1227 rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE,
1228 MASKBYTE0, 0x23);
1229 rtl_set_bbreg(hw, 0xe70, 0x7fc00000, 0x10c);
1230 }
1231 } else {
1232 rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0,
1233 0x33);
1234 rtl_set_bbreg(hw, 0xe70, MASKBYTE3, 0x63);
1235 }
1236 dm_pstable.pre_ccastate = dm_pstable.cur_ccasate;
1237 }
1238
1239 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, ("CCAStage = %s\n",
1240 (dm_pstable.cur_ccasate ==
1241 0) ? "1RCCA" : "2RCCA"));
1242}
1243
1244void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal)
1245{
1246 static u8 initialize;
1247 static u32 reg_874, reg_c70, reg_85c, reg_a74;
1248
1249 if (initialize == 0) {
1250 reg_874 = (rtl_get_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
1251 MASKDWORD) & 0x1CC000) >> 14;
1252
1253 reg_c70 = (rtl_get_bbreg(hw, ROFDM0_AGCPARAMETER1,
1254 MASKDWORD) & BIT(3)) >> 3;
1255
1256 reg_85c = (rtl_get_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL,
1257 MASKDWORD) & 0xFF000000) >> 24;
1258
1259 reg_a74 = (rtl_get_bbreg(hw, 0xa74, MASKDWORD) & 0xF000) >> 12;
1260
1261 initialize = 1;
1262 }
1263
1264 if (!bforce_in_normal) {
1265 if (dm_pstable.rssi_val_min != 0) {
1266 if (dm_pstable.pre_rfstate == RF_NORMAL) {
1267 if (dm_pstable.rssi_val_min >= 30)
1268 dm_pstable.cur_rfstate = RF_SAVE;
1269 else
1270 dm_pstable.cur_rfstate = RF_NORMAL;
1271 } else {
1272 if (dm_pstable.rssi_val_min <= 25)
1273 dm_pstable.cur_rfstate = RF_NORMAL;
1274 else
1275 dm_pstable.cur_rfstate = RF_SAVE;
1276 }
1277 } else {
1278 dm_pstable.cur_rfstate = RF_MAX;
1279 }
1280 } else {
1281 dm_pstable.cur_rfstate = RF_NORMAL;
1282 }
1283
1284 if (dm_pstable.pre_rfstate != dm_pstable.cur_rfstate) {
1285 if (dm_pstable.cur_rfstate == RF_SAVE) {
1286 rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
1287 0x1C0000, 0x2);
1288 rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3), 0);
1289 rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL,
1290 0xFF000000, 0x63);
1291 rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
1292 0xC000, 0x2);
1293 rtl_set_bbreg(hw, 0xa74, 0xF000, 0x3);
1294 rtl_set_bbreg(hw, 0x818, BIT(28), 0x0);
1295 rtl_set_bbreg(hw, 0x818, BIT(28), 0x1);
1296 } else {
1297 rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
1298 0x1CC000, reg_874);
1299 rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3),
1300 reg_c70);
1301 rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL, 0xFF000000,
1302 reg_85c);
1303 rtl_set_bbreg(hw, 0xa74, 0xF000, reg_a74);
1304 rtl_set_bbreg(hw, 0x818, BIT(28), 0x0);
1305 }
1306
1307 dm_pstable.pre_rfstate = dm_pstable.cur_rfstate;
1308 }
1309}
1310
1311static void rtl92c_dm_dynamic_bb_powersaving(struct ieee80211_hw *hw)
1312{
1313 struct rtl_priv *rtlpriv = rtl_priv(hw);
1314 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1315 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1316
1317 if (((mac->link_state == MAC80211_NOLINK)) &&
1318 (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
1319 dm_pstable.rssi_val_min = 0;
1320 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
1321 ("Not connected to any\n"));
1322 }
1323
1324 if (mac->link_state == MAC80211_LINKED) {
1325 if (mac->opmode == NL80211_IFTYPE_ADHOC) {
1326 dm_pstable.rssi_val_min =
1327 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
1328 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
1329 ("AP Client PWDB = 0x%lx\n",
1330 dm_pstable.rssi_val_min));
1331 } else {
1332 dm_pstable.rssi_val_min =
1333 rtlpriv->dm.undecorated_smoothed_pwdb;
1334 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
1335 ("STA Default Port PWDB = 0x%lx\n",
1336 dm_pstable.rssi_val_min));
1337 }
1338 } else {
1339 dm_pstable.rssi_val_min =
1340 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
1341
1342 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
1343 ("AP Ext Port PWDB = 0x%lx\n",
1344 dm_pstable.rssi_val_min));
1345 }
1346
1347 if (IS_92C_SERIAL(rtlhal->version))
1348 rtl92c_dm_1r_cca(hw);
1349}
1350
1351void rtl92c_dm_init(struct ieee80211_hw *hw)
1352{
1353 struct rtl_priv *rtlpriv = rtl_priv(hw);
1354
1355 rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
1356 rtl92c_dm_diginit(hw);
1357 rtl92c_dm_init_dynamic_txpower(hw);
1358 rtl92c_dm_init_edca_turbo(hw);
1359 rtl92c_dm_init_rate_adaptive_mask(hw);
1360 rtl92c_dm_initialize_txpower_tracking(hw);
1361 rtl92c_dm_init_dynamic_bb_powersaving(hw);
1362}
1363
1364void rtl92c_dm_watchdog(struct ieee80211_hw *hw)
1365{
1366 struct rtl_priv *rtlpriv = rtl_priv(hw);
1367 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1368 bool fw_current_inpsmode = false;
1369 bool fw_ps_awake = true;
1370
1371 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
1372 (u8 *) (&fw_current_inpsmode));
1373 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON,
1374 (u8 *) (&fw_ps_awake));
1375
1376 if ((ppsc->rfpwr_state == ERFON) && ((!fw_current_inpsmode) &&
1377 fw_ps_awake)
1378 && (!ppsc->rfchange_inprogress)) {
1379 rtl92c_dm_pwdb_monitor(hw);
1380 rtl92c_dm_dig(hw);
1381 rtl92c_dm_false_alarm_counter_statistics(hw);
1382 rtl92c_dm_dynamic_bb_powersaving(hw);
1383 rtl92c_dm_dynamic_txpower(hw);
1384 rtl92c_dm_check_txpower_tracking(hw);
1385 rtl92c_dm_refresh_rate_adaptive_mask(hw);
1386 rtl92c_dm_check_edca_turbo(hw);
1387 }
1388}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
new file mode 100644
index 00000000000..3728abc4df5
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
@@ -0,0 +1,2049 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30/* Define macro to shorten lines */
31#define MCS_TXPWR mcs_txpwrlevel_origoffset
32
33static u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw,
34 enum radio_path rfpath, u32 offset);
35static void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
36 enum radio_path rfpath, u32 offset,
37 u32 data);
38static u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
39 enum radio_path rfpath, u32 offset);
40static void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
41 enum radio_path rfpath, u32 offset,
42 u32 data);
43static u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask);
44static bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw);
45static bool _rtl92c_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
46static bool _rtl92c_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
47 u8 configtype);
48static bool _rtl92c_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
49 u8 configtype);
50static void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw);
51static bool _rtl92c_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
52 u32 cmdtableidx, u32 cmdtablesz,
53 enum swchnlcmd_id cmdid, u32 para1,
54 u32 para2, u32 msdelay);
55static bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
56 u8 channel, u8 *stage, u8 *step,
57 u32 *delay);
58static u8 _rtl92c_phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw,
59 enum wireless_mode wirelessmode,
60 long power_indbm);
61static bool _rtl92c_phy_config_rf_external_pa(struct ieee80211_hw *hw,
62 enum radio_path rfpath);
63static long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
64 enum wireless_mode wirelessmode,
65 u8 txpwridx);
66static void _rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t);
67
68u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
69{
70 struct rtl_priv *rtlpriv = rtl_priv(hw);
71 u32 returnvalue, originalvalue, bitshift;
72
73 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), "
74 "bitmask(%#x)\n", regaddr,
75 bitmask));
76 originalvalue = rtl_read_dword(rtlpriv, regaddr);
77 bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
78 returnvalue = (originalvalue & bitmask) >> bitshift;
79
80 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("BBR MASK=0x%x "
81 "Addr[0x%x]=0x%x\n", bitmask,
82 regaddr, originalvalue));
83
84 return returnvalue;
85
86}
87
88void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
89 u32 regaddr, u32 bitmask, u32 data)
90{
91 struct rtl_priv *rtlpriv = rtl_priv(hw);
92 u32 originalvalue, bitshift;
93
94 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
95 " data(%#x)\n", regaddr, bitmask,
96 data));
97
98 if (bitmask != MASKDWORD) {
99 originalvalue = rtl_read_dword(rtlpriv, regaddr);
100 bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
101 data = ((originalvalue & (~bitmask)) | (data << bitshift));
102 }
103
104 rtl_write_dword(rtlpriv, regaddr, data);
105
106 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
107 " data(%#x)\n", regaddr, bitmask,
108 data));
109
110}
111
112static u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw,
113 enum radio_path rfpath, u32 offset)
114{
115 RT_ASSERT(false, ("deprecated!\n"));
116 return 0;
117}
118
119static void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
120 enum radio_path rfpath, u32 offset,
121 u32 data)
122{
123 RT_ASSERT(false, ("deprecated!\n"));
124}
125
126static u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
127 enum radio_path rfpath, u32 offset)
128{
129 struct rtl_priv *rtlpriv = rtl_priv(hw);
130 struct rtl_phy *rtlphy = &(rtlpriv->phy);
131 struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
132 u32 newoffset;
133 u32 tmplong, tmplong2;
134 u8 rfpi_enable = 0;
135 u32 retvalue;
136
137 offset &= 0x3f;
138 newoffset = offset;
139 if (RT_CANNOT_IO(hw)) {
140 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("return all one\n"));
141 return 0xFFFFFFFF;
142 }
143 tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
144 if (rfpath == RF90_PATH_A)
145 tmplong2 = tmplong;
146 else
147 tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD);
148 tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) |
149 (newoffset << 23) | BLSSIREADEDGE;
150 rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
151 tmplong & (~BLSSIREADEDGE));
152 mdelay(1);
153 rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2);
154 mdelay(1);
155 rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
156 tmplong | BLSSIREADEDGE);
157 mdelay(1);
158 if (rfpath == RF90_PATH_A)
159 rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1,
160 BIT(8));
161 else if (rfpath == RF90_PATH_B)
162 rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1,
163 BIT(8));
164 if (rfpi_enable)
165 retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readbackpi,
166 BLSSIREADBACKDATA);
167 else
168 retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readback,
169 BLSSIREADBACKDATA);
170 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("RFR-%d Addr[0x%x]=0x%x\n",
171 rfpath, pphyreg->rflssi_readback,
172 retvalue));
173 return retvalue;
174}
175
176static void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
177 enum radio_path rfpath, u32 offset,
178 u32 data)
179{
180 u32 data_and_addr;
181 u32 newoffset;
182 struct rtl_priv *rtlpriv = rtl_priv(hw);
183 struct rtl_phy *rtlphy = &(rtlpriv->phy);
184 struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
185
186 if (RT_CANNOT_IO(hw)) {
187 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("stop\n"));
188 return;
189 }
190 offset &= 0x3f;
191 newoffset = offset;
192 data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
193 rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
194 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("RFW-%d Addr[0x%x]=0x%x\n",
195 rfpath, pphyreg->rf3wire_offset,
196 data_and_addr));
197}
198
199static u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask)
200{
201 u32 i;
202
203 for (i = 0; i <= 31; i++) {
204 if (((bitmask >> i) & 0x1) == 1)
205 break;
206 }
207 return i;
208}
209
210static void _rtl92c_phy_bb_config_1t(struct ieee80211_hw *hw)
211{
212 rtl_set_bbreg(hw, RFPGA0_TXINFO, 0x3, 0x2);
213 rtl_set_bbreg(hw, RFPGA1_TXINFO, 0x300033, 0x200022);
214 rtl_set_bbreg(hw, RCCK0_AFESETTING, MASKBYTE3, 0x45);
215 rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x23);
216 rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, 0x30, 0x1);
217 rtl_set_bbreg(hw, 0xe74, 0x0c000000, 0x2);
218 rtl_set_bbreg(hw, 0xe78, 0x0c000000, 0x2);
219 rtl_set_bbreg(hw, 0xe7c, 0x0c000000, 0x2);
220 rtl_set_bbreg(hw, 0xe80, 0x0c000000, 0x2);
221 rtl_set_bbreg(hw, 0xe88, 0x0c000000, 0x2);
222}
223bool rtl92c_phy_rf_config(struct ieee80211_hw *hw)
224{
225 return rtl92c_phy_rf6052_config(hw);
226}
227
228static bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw)
229{
230 struct rtl_priv *rtlpriv = rtl_priv(hw);
231 struct rtl_phy *rtlphy = &(rtlpriv->phy);
232 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
233 bool rtstatus;
234
235 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("==>\n"));
236 rtstatus = _rtl92c_phy_config_bb_with_headerfile(hw,
237 BASEBAND_CONFIG_PHY_REG);
238 if (rtstatus != true) {
239 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("Write BB Reg Fail!!"));
240 return false;
241 }
242 if (rtlphy->rf_type == RF_1T2R) {
243 _rtl92c_phy_bb_config_1t(hw);
244 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Config to 1T!!\n"));
245 }
246 if (rtlefuse->autoload_failflag == false) {
247 rtlphy->pwrgroup_cnt = 0;
248 rtstatus = _rtl92c_phy_config_bb_with_pgheaderfile(hw,
249 BASEBAND_CONFIG_PHY_REG);
250 }
251 if (rtstatus != true) {
252 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("BB_PG Reg Fail!!"));
253 return false;
254 }
255 rtstatus = _rtl92c_phy_config_bb_with_headerfile(hw,
256 BASEBAND_CONFIG_AGC_TAB);
257 if (rtstatus != true) {
258 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("AGC Table Fail\n"));
259 return false;
260 }
261 rtlphy->cck_high_power = (bool) (rtl_get_bbreg(hw,
262 RFPGA0_XA_HSSIPARAMETER2,
263 0x200));
264 return true;
265}
266
267
268void rtl92c_phy_config_bb_external_pa(struct ieee80211_hw *hw)
269{
270}
271
272static void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
273 u32 regaddr, u32 bitmask,
274 u32 data)
275{
276 struct rtl_priv *rtlpriv = rtl_priv(hw);
277 struct rtl_phy *rtlphy = &(rtlpriv->phy);
278
279 if (regaddr == RTXAGC_A_RATE18_06) {
280 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][0] = data;
281 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
282 ("MCSTxPowerLevelOriginalOffset[%d][0] = 0x%x\n",
283 rtlphy->pwrgroup_cnt,
284 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][0]));
285 }
286 if (regaddr == RTXAGC_A_RATE54_24) {
287 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][1] = data;
288 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
289 ("MCSTxPowerLevelOriginalOffset[%d][1] = 0x%x\n",
290 rtlphy->pwrgroup_cnt,
291 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][1]));
292 }
293 if (regaddr == RTXAGC_A_CCK1_MCS32) {
294 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][6] = data;
295 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
296 ("MCSTxPowerLevelOriginalOffset[%d][6] = 0x%x\n",
297 rtlphy->pwrgroup_cnt,
298 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][6]));
299 }
300 if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0xffffff00) {
301 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][7] = data;
302 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
303 ("MCSTxPowerLevelOriginalOffset[%d][7] = 0x%x\n",
304 rtlphy->pwrgroup_cnt,
305 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][7]));
306 }
307 if (regaddr == RTXAGC_A_MCS03_MCS00) {
308 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][2] = data;
309 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
310 ("MCSTxPowerLevelOriginalOffset[%d][2] = 0x%x\n",
311 rtlphy->pwrgroup_cnt,
312 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][2]));
313 }
314 if (regaddr == RTXAGC_A_MCS07_MCS04) {
315 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][3] = data;
316 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
317 ("MCSTxPowerLevelOriginalOffset[%d][3] = 0x%x\n",
318 rtlphy->pwrgroup_cnt,
319 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][3]));
320 }
321 if (regaddr == RTXAGC_A_MCS11_MCS08) {
322 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][4] = data;
323 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
324 ("MCSTxPowerLevelOriginalOffset[%d][4] = 0x%x\n",
325 rtlphy->pwrgroup_cnt,
326 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][4]));
327 }
328 if (regaddr == RTXAGC_A_MCS15_MCS12) {
329 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][5] = data;
330 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
331 ("MCSTxPowerLevelOriginalOffset[%d][5] = 0x%x\n",
332 rtlphy->pwrgroup_cnt,
333 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][5]));
334 }
335 if (regaddr == RTXAGC_B_RATE18_06) {
336 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][8] = data;
337 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
338 ("MCSTxPowerLevelOriginalOffset[%d][8] = 0x%x\n",
339 rtlphy->pwrgroup_cnt,
340 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][8]));
341 }
342 if (regaddr == RTXAGC_B_RATE54_24) {
343 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][9] = data;
344
345 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
346 ("MCSTxPowerLevelOriginalOffset[%d][9] = 0x%x\n",
347 rtlphy->pwrgroup_cnt,
348 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][9]));
349 }
350
351 if (regaddr == RTXAGC_B_CCK1_55_MCS32) {
352 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][14] = data;
353
354 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
355 ("MCSTxPowerLevelOriginalOffset[%d][14] = 0x%x\n",
356 rtlphy->pwrgroup_cnt,
357 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][14]));
358 }
359
360 if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0x000000ff) {
361 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][15] = data;
362
363 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
364 ("MCSTxPowerLevelOriginalOffset[%d][15] = 0x%x\n",
365 rtlphy->pwrgroup_cnt,
366 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][15]));
367 }
368
369 if (regaddr == RTXAGC_B_MCS03_MCS00) {
370 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][10] = data;
371
372 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
373 ("MCSTxPowerLevelOriginalOffset[%d][10] = 0x%x\n",
374 rtlphy->pwrgroup_cnt,
375 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][10]));
376 }
377
378 if (regaddr == RTXAGC_B_MCS07_MCS04) {
379 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][11] = data;
380
381 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
382 ("MCSTxPowerLevelOriginalOffset[%d][11] = 0x%x\n",
383 rtlphy->pwrgroup_cnt,
384 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][11]));
385 }
386
387 if (regaddr == RTXAGC_B_MCS11_MCS08) {
388 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][12] = data;
389
390 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
391 ("MCSTxPowerLevelOriginalOffset[%d][12] = 0x%x\n",
392 rtlphy->pwrgroup_cnt,
393 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][12]));
394 }
395
396 if (regaddr == RTXAGC_B_MCS15_MCS12) {
397 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][13] = data;
398
399 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
400 ("MCSTxPowerLevelOriginalOffset[%d][13] = 0x%x\n",
401 rtlphy->pwrgroup_cnt,
402 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][13]));
403
404 rtlphy->pwrgroup_cnt++;
405 }
406}
407
408static bool _rtl92c_phy_config_rf_external_pa(struct ieee80211_hw *hw,
409 enum radio_path rfpath)
410{
411 return true;
412}
413
414void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
415{
416 struct rtl_priv *rtlpriv = rtl_priv(hw);
417 struct rtl_phy *rtlphy = &(rtlpriv->phy);
418
419 rtlphy->default_initialgain[0] =
420 (u8) rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0);
421 rtlphy->default_initialgain[1] =
422 (u8) rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0);
423 rtlphy->default_initialgain[2] =
424 (u8) rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, MASKBYTE0);
425 rtlphy->default_initialgain[3] =
426 (u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0);
427
428 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
429 ("Default initial gain (c50=0x%x, "
430 "c58=0x%x, c60=0x%x, c68=0x%x\n",
431 rtlphy->default_initialgain[0],
432 rtlphy->default_initialgain[1],
433 rtlphy->default_initialgain[2],
434 rtlphy->default_initialgain[3]));
435
436 rtlphy->framesync = (u8) rtl_get_bbreg(hw,
437 ROFDM0_RXDETECTOR3, MASKBYTE0);
438 rtlphy->framesync_c34 = rtl_get_bbreg(hw,
439 ROFDM0_RXDETECTOR2, MASKDWORD);
440
441 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
442 ("Default framesync (0x%x) = 0x%x\n",
443 ROFDM0_RXDETECTOR3, rtlphy->framesync));
444}
445
446static void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
447{
448 struct rtl_priv *rtlpriv = rtl_priv(hw);
449 struct rtl_phy *rtlphy = &(rtlpriv->phy);
450
451 rtlphy->phyreg_def[RF90_PATH_A].rfintfs = RFPGA0_XAB_RFINTERFACESW;
452 rtlphy->phyreg_def[RF90_PATH_B].rfintfs = RFPGA0_XAB_RFINTERFACESW;
453 rtlphy->phyreg_def[RF90_PATH_C].rfintfs = RFPGA0_XCD_RFINTERFACESW;
454 rtlphy->phyreg_def[RF90_PATH_D].rfintfs = RFPGA0_XCD_RFINTERFACESW;
455
456 rtlphy->phyreg_def[RF90_PATH_A].rfintfi = RFPGA0_XAB_RFINTERFACERB;
457 rtlphy->phyreg_def[RF90_PATH_B].rfintfi = RFPGA0_XAB_RFINTERFACERB;
458 rtlphy->phyreg_def[RF90_PATH_C].rfintfi = RFPGA0_XCD_RFINTERFACERB;
459 rtlphy->phyreg_def[RF90_PATH_D].rfintfi = RFPGA0_XCD_RFINTERFACERB;
460
461 rtlphy->phyreg_def[RF90_PATH_A].rfintfo = RFPGA0_XA_RFINTERFACEOE;
462 rtlphy->phyreg_def[RF90_PATH_B].rfintfo = RFPGA0_XB_RFINTERFACEOE;
463
464 rtlphy->phyreg_def[RF90_PATH_A].rfintfe = RFPGA0_XA_RFINTERFACEOE;
465 rtlphy->phyreg_def[RF90_PATH_B].rfintfe = RFPGA0_XB_RFINTERFACEOE;
466
467 rtlphy->phyreg_def[RF90_PATH_A].rf3wire_offset =
468 RFPGA0_XA_LSSIPARAMETER;
469 rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset =
470 RFPGA0_XB_LSSIPARAMETER;
471
472 rtlphy->phyreg_def[RF90_PATH_A].rflssi_select = rFPGA0_XAB_RFPARAMETER;
473 rtlphy->phyreg_def[RF90_PATH_B].rflssi_select = rFPGA0_XAB_RFPARAMETER;
474 rtlphy->phyreg_def[RF90_PATH_C].rflssi_select = rFPGA0_XCD_RFPARAMETER;
475 rtlphy->phyreg_def[RF90_PATH_D].rflssi_select = rFPGA0_XCD_RFPARAMETER;
476
477 rtlphy->phyreg_def[RF90_PATH_A].rftxgain_stage = RFPGA0_TXGAINSTAGE;
478 rtlphy->phyreg_def[RF90_PATH_B].rftxgain_stage = RFPGA0_TXGAINSTAGE;
479 rtlphy->phyreg_def[RF90_PATH_C].rftxgain_stage = RFPGA0_TXGAINSTAGE;
480 rtlphy->phyreg_def[RF90_PATH_D].rftxgain_stage = RFPGA0_TXGAINSTAGE;
481
482 rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para1 = RFPGA0_XA_HSSIPARAMETER1;
483 rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para1 = RFPGA0_XB_HSSIPARAMETER1;
484
485 rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RFPGA0_XA_HSSIPARAMETER2;
486 rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RFPGA0_XB_HSSIPARAMETER2;
487
488 rtlphy->phyreg_def[RF90_PATH_A].rfswitch_control =
489 RFPGA0_XAB_SWITCHCONTROL;
490 rtlphy->phyreg_def[RF90_PATH_B].rfswitch_control =
491 RFPGA0_XAB_SWITCHCONTROL;
492 rtlphy->phyreg_def[RF90_PATH_C].rfswitch_control =
493 RFPGA0_XCD_SWITCHCONTROL;
494 rtlphy->phyreg_def[RF90_PATH_D].rfswitch_control =
495 RFPGA0_XCD_SWITCHCONTROL;
496
497 rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1;
498 rtlphy->phyreg_def[RF90_PATH_B].rfagc_control1 = ROFDM0_XBAGCCORE1;
499 rtlphy->phyreg_def[RF90_PATH_C].rfagc_control1 = ROFDM0_XCAGCCORE1;
500 rtlphy->phyreg_def[RF90_PATH_D].rfagc_control1 = ROFDM0_XDAGCCORE1;
501
502 rtlphy->phyreg_def[RF90_PATH_A].rfagc_control2 = ROFDM0_XAAGCCORE2;
503 rtlphy->phyreg_def[RF90_PATH_B].rfagc_control2 = ROFDM0_XBAGCCORE2;
504 rtlphy->phyreg_def[RF90_PATH_C].rfagc_control2 = ROFDM0_XCAGCCORE2;
505 rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2;
506
507 rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbalance =
508 ROFDM0_XARXIQIMBALANCE;
509 rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbalance =
510 ROFDM0_XBRXIQIMBALANCE;
511 rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbalance =
512 ROFDM0_XCRXIQIMBANLANCE;
513 rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbalance =
514 ROFDM0_XDRXIQIMBALANCE;
515
516 rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE;
517 rtlphy->phyreg_def[RF90_PATH_B].rfrx_afe = ROFDM0_XBRXAFE;
518 rtlphy->phyreg_def[RF90_PATH_C].rfrx_afe = ROFDM0_XCRXAFE;
519 rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE;
520
521 rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbalance =
522 ROFDM0_XATXIQIMBALANCE;
523 rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbalance =
524 ROFDM0_XBTXIQIMBALANCE;
525 rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbalance =
526 ROFDM0_XCTXIQIMBALANCE;
527 rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbalance =
528 ROFDM0_XDTXIQIMBALANCE;
529
530 rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE;
531 rtlphy->phyreg_def[RF90_PATH_B].rftx_afe = ROFDM0_XBTXAFE;
532 rtlphy->phyreg_def[RF90_PATH_C].rftx_afe = ROFDM0_XCTXAFE;
533 rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTXAFE;
534
535 rtlphy->phyreg_def[RF90_PATH_A].rflssi_readback =
536 RFPGA0_XA_LSSIREADBACK;
537 rtlphy->phyreg_def[RF90_PATH_B].rflssi_readback =
538 RFPGA0_XB_LSSIREADBACK;
539 rtlphy->phyreg_def[RF90_PATH_C].rflssi_readback =
540 RFPGA0_XC_LSSIREADBACK;
541 rtlphy->phyreg_def[RF90_PATH_D].rflssi_readback =
542 RFPGA0_XD_LSSIREADBACK;
543
544 rtlphy->phyreg_def[RF90_PATH_A].rflssi_readbackpi =
545 TRANSCEIVEA_HSPI_READBACK;
546 rtlphy->phyreg_def[RF90_PATH_B].rflssi_readbackpi =
547 TRANSCEIVEB_HSPI_READBACK;
548
549}
550
551void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel)
552{
553 struct rtl_priv *rtlpriv = rtl_priv(hw);
554 struct rtl_phy *rtlphy = &(rtlpriv->phy);
555 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
556 u8 txpwr_level;
557 long txpwr_dbm;
558
559 txpwr_level = rtlphy->cur_cck_txpwridx;
560 txpwr_dbm = _rtl92c_phy_txpwr_idx_to_dbm(hw,
561 WIRELESS_MODE_B, txpwr_level);
562 txpwr_level = rtlphy->cur_ofdm24g_txpwridx +
563 rtlefuse->legacy_ht_txpowerdiff;
564 if (_rtl92c_phy_txpwr_idx_to_dbm(hw,
565 WIRELESS_MODE_G,
566 txpwr_level) > txpwr_dbm)
567 txpwr_dbm =
568 _rtl92c_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G,
569 txpwr_level);
570 txpwr_level = rtlphy->cur_ofdm24g_txpwridx;
571 if (_rtl92c_phy_txpwr_idx_to_dbm(hw,
572 WIRELESS_MODE_N_24G,
573 txpwr_level) > txpwr_dbm)
574 txpwr_dbm =
575 _rtl92c_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G,
576 txpwr_level);
577 *powerlevel = txpwr_dbm;
578}
579
580static void _rtl92c_get_txpower_index(struct ieee80211_hw *hw, u8 channel,
581 u8 *cckpowerlevel, u8 *ofdmpowerlevel)
582{
583 struct rtl_priv *rtlpriv = rtl_priv(hw);
584 struct rtl_phy *rtlphy = &(rtlpriv->phy);
585 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
586 u8 index = (channel - 1);
587
588 cckpowerlevel[RF90_PATH_A] =
589 rtlefuse->txpwrlevel_cck[RF90_PATH_A][index];
590 cckpowerlevel[RF90_PATH_B] =
591 rtlefuse->txpwrlevel_cck[RF90_PATH_B][index];
592 if (get_rf_type(rtlphy) == RF_1T2R || get_rf_type(rtlphy) == RF_1T1R) {
593 ofdmpowerlevel[RF90_PATH_A] =
594 rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_A][index];
595 ofdmpowerlevel[RF90_PATH_B] =
596 rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_B][index];
597 } else if (get_rf_type(rtlphy) == RF_2T2R) {
598 ofdmpowerlevel[RF90_PATH_A] =
599 rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_A][index];
600 ofdmpowerlevel[RF90_PATH_B] =
601 rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_B][index];
602 }
603}
604
605static void _rtl92c_ccxpower_index_check(struct ieee80211_hw *hw,
606 u8 channel, u8 *cckpowerlevel,
607 u8 *ofdmpowerlevel)
608{
609 struct rtl_priv *rtlpriv = rtl_priv(hw);
610 struct rtl_phy *rtlphy = &(rtlpriv->phy);
611
612 rtlphy->cur_cck_txpwridx = cckpowerlevel[0];
613 rtlphy->cur_ofdm24g_txpwridx = ofdmpowerlevel[0];
614}
615
616void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
617{
618 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
619 u8 cckpowerlevel[2], ofdmpowerlevel[2];
620
621 if (rtlefuse->txpwr_fromeprom == false)
622 return;
623 _rtl92c_get_txpower_index(hw, channel,
624 &cckpowerlevel[0], &ofdmpowerlevel[0]);
625 _rtl92c_ccxpower_index_check(hw,
626 channel, &cckpowerlevel[0],
627 &ofdmpowerlevel[0]);
628 rtl92c_phy_rf6052_set_cck_txpower(hw, &cckpowerlevel[0]);
629 rtl92c_phy_rf6052_set_ofdm_txpower(hw, &ofdmpowerlevel[0], channel);
630}
631
632bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw, long power_indbm)
633{
634 struct rtl_priv *rtlpriv = rtl_priv(hw);
635 struct rtl_phy *rtlphy = &(rtlpriv->phy);
636 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
637 u8 idx;
638 u8 rf_path;
639
640 u8 ccktxpwridx = _rtl92c_phy_dbm_to_txpwr_Idx(hw,
641 WIRELESS_MODE_B,
642 power_indbm);
643 u8 ofdmtxpwridx = _rtl92c_phy_dbm_to_txpwr_Idx(hw,
644 WIRELESS_MODE_N_24G,
645 power_indbm);
646 if (ofdmtxpwridx - rtlefuse->legacy_ht_txpowerdiff > 0)
647 ofdmtxpwridx -= rtlefuse->legacy_ht_txpowerdiff;
648 else
649 ofdmtxpwridx = 0;
650 RT_TRACE(rtlpriv, COMP_TXAGC, DBG_TRACE,
651 ("%lx dBm, ccktxpwridx = %d, ofdmtxpwridx = %d\n",
652 power_indbm, ccktxpwridx, ofdmtxpwridx));
653 for (idx = 0; idx < 14; idx++) {
654 for (rf_path = 0; rf_path < 2; rf_path++) {
655 rtlefuse->txpwrlevel_cck[rf_path][idx] = ccktxpwridx;
656 rtlefuse->txpwrlevel_ht40_1s[rf_path][idx] =
657 ofdmtxpwridx;
658 rtlefuse->txpwrlevel_ht40_2s[rf_path][idx] =
659 ofdmtxpwridx;
660 }
661 }
662 rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
663 return true;
664}
665
666void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw, u16 beaconinterval)
667{
668}
669
670static u8 _rtl92c_phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw,
671 enum wireless_mode wirelessmode,
672 long power_indbm)
673{
674 u8 txpwridx;
675 long offset;
676
677 switch (wirelessmode) {
678 case WIRELESS_MODE_B:
679 offset = -7;
680 break;
681 case WIRELESS_MODE_G:
682 case WIRELESS_MODE_N_24G:
683 offset = -8;
684 break;
685 default:
686 offset = -8;
687 break;
688 }
689
690 if ((power_indbm - offset) > 0)
691 txpwridx = (u8) ((power_indbm - offset) * 2);
692 else
693 txpwridx = 0;
694
695 if (txpwridx > MAX_TXPWR_IDX_NMODE_92S)
696 txpwridx = MAX_TXPWR_IDX_NMODE_92S;
697
698 return txpwridx;
699}
700
701static long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
702 enum wireless_mode wirelessmode,
703 u8 txpwridx)
704{
705 long offset;
706 long pwrout_dbm;
707
708 switch (wirelessmode) {
709 case WIRELESS_MODE_B:
710 offset = -7;
711 break;
712 case WIRELESS_MODE_G:
713 case WIRELESS_MODE_N_24G:
714 offset = -8;
715 break;
716 default:
717 offset = -8;
718 break;
719 }
720 pwrout_dbm = txpwridx / 2 + offset;
721 return pwrout_dbm;
722}
723
724void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
725{
726 struct rtl_priv *rtlpriv = rtl_priv(hw);
727 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
728 enum io_type iotype;
729
730 if (!is_hal_stop(rtlhal)) {
731 switch (operation) {
732 case SCAN_OPT_BACKUP:
733 iotype = IO_CMD_PAUSE_DM_BY_SCAN;
734 rtlpriv->cfg->ops->set_hw_reg(hw,
735 HW_VAR_IO_CMD,
736 (u8 *)&iotype);
737
738 break;
739 case SCAN_OPT_RESTORE:
740 iotype = IO_CMD_RESUME_DM_BY_SCAN;
741 rtlpriv->cfg->ops->set_hw_reg(hw,
742 HW_VAR_IO_CMD,
743 (u8 *)&iotype);
744 break;
745 default:
746 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
747 ("Unknown Scan Backup operation.\n"));
748 break;
749 }
750 }
751}
752
753void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
754 enum nl80211_channel_type ch_type)
755{
756 struct rtl_priv *rtlpriv = rtl_priv(hw);
757 struct rtl_phy *rtlphy = &(rtlpriv->phy);
758 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
759 u8 tmp_bw = rtlphy->current_chan_bw;
760
761 if (rtlphy->set_bwmode_inprogress)
762 return;
763 rtlphy->set_bwmode_inprogress = true;
764 if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw)))
765 rtl92c_phy_set_bw_mode_callback(hw);
766 else {
767 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
768 ("FALSE driver sleep or unload\n"));
769 rtlphy->set_bwmode_inprogress = false;
770 rtlphy->current_chan_bw = tmp_bw;
771 }
772}
773
774void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw)
775{
776 struct rtl_priv *rtlpriv = rtl_priv(hw);
777 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
778 struct rtl_phy *rtlphy = &(rtlpriv->phy);
779 u32 delay;
780
781 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
782 ("switch to channel%d\n", rtlphy->current_channel));
783 if (is_hal_stop(rtlhal))
784 return;
785 do {
786 if (!rtlphy->sw_chnl_inprogress)
787 break;
788 if (!_rtl92c_phy_sw_chnl_step_by_step
789 (hw, rtlphy->current_channel, &rtlphy->sw_chnl_stage,
790 &rtlphy->sw_chnl_step, &delay)) {
791 if (delay > 0)
792 mdelay(delay);
793 else
794 continue;
795 } else
796 rtlphy->sw_chnl_inprogress = false;
797 break;
798 } while (true);
799 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n"));
800}
801
802u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw)
803{
804 struct rtl_priv *rtlpriv = rtl_priv(hw);
805 struct rtl_phy *rtlphy = &(rtlpriv->phy);
806 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
807
808 if (rtlphy->sw_chnl_inprogress)
809 return 0;
810 if (rtlphy->set_bwmode_inprogress)
811 return 0;
812 RT_ASSERT((rtlphy->current_channel <= 14),
813 ("WIRELESS_MODE_G but channel>14"));
814 rtlphy->sw_chnl_inprogress = true;
815 rtlphy->sw_chnl_stage = 0;
816 rtlphy->sw_chnl_step = 0;
817 if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
818 rtl92c_phy_sw_chnl_callback(hw);
819 RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
820 ("sw_chnl_inprogress false schdule workitem\n"));
821 rtlphy->sw_chnl_inprogress = false;
822 } else {
823 RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
824 ("sw_chnl_inprogress false driver sleep or"
825 " unload\n"));
826 rtlphy->sw_chnl_inprogress = false;
827 }
828 return 1;
829}
830
831static bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
832 u8 channel, u8 *stage, u8 *step,
833 u32 *delay)
834{
835 struct rtl_priv *rtlpriv = rtl_priv(hw);
836 struct rtl_phy *rtlphy = &(rtlpriv->phy);
837 struct swchnlcmd precommoncmd[MAX_PRECMD_CNT];
838 u32 precommoncmdcnt;
839 struct swchnlcmd postcommoncmd[MAX_POSTCMD_CNT];
840 u32 postcommoncmdcnt;
841 struct swchnlcmd rfdependcmd[MAX_RFDEPENDCMD_CNT];
842 u32 rfdependcmdcnt;
843 struct swchnlcmd *currentcmd = NULL;
844 u8 rfpath;
845 u8 num_total_rfpath = rtlphy->num_total_rfpath;
846
847 precommoncmdcnt = 0;
848 _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
849 MAX_PRECMD_CNT,
850 CMDID_SET_TXPOWEROWER_LEVEL, 0, 0, 0);
851 _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
852 MAX_PRECMD_CNT, CMDID_END, 0, 0, 0);
853
854 postcommoncmdcnt = 0;
855
856 _rtl92c_phy_set_sw_chnl_cmdarray(postcommoncmd, postcommoncmdcnt++,
857 MAX_POSTCMD_CNT, CMDID_END, 0, 0, 0);
858
859 rfdependcmdcnt = 0;
860
861 RT_ASSERT((channel >= 1 && channel <= 14),
862 ("illegal channel for Zebra: %d\n", channel));
863
864 _rtl92c_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
865 MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG,
866 RF_CHNLBW, channel, 10);
867
868 _rtl92c_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
869 MAX_RFDEPENDCMD_CNT, CMDID_END, 0, 0,
870 0);
871
872 do {
873 switch (*stage) {
874 case 0:
875 currentcmd = &precommoncmd[*step];
876 break;
877 case 1:
878 currentcmd = &rfdependcmd[*step];
879 break;
880 case 2:
881 currentcmd = &postcommoncmd[*step];
882 break;
883 }
884
885 if (currentcmd->cmdid == CMDID_END) {
886 if ((*stage) == 2) {
887 return true;
888 } else {
889 (*stage)++;
890 (*step) = 0;
891 continue;
892 }
893 }
894
895 switch (currentcmd->cmdid) {
896 case CMDID_SET_TXPOWEROWER_LEVEL:
897 rtl92c_phy_set_txpower_level(hw, channel);
898 break;
899 case CMDID_WRITEPORT_ULONG:
900 rtl_write_dword(rtlpriv, currentcmd->para1,
901 currentcmd->para2);
902 break;
903 case CMDID_WRITEPORT_USHORT:
904 rtl_write_word(rtlpriv, currentcmd->para1,
905 (u16) currentcmd->para2);
906 break;
907 case CMDID_WRITEPORT_UCHAR:
908 rtl_write_byte(rtlpriv, currentcmd->para1,
909 (u8) currentcmd->para2);
910 break;
911 case CMDID_RF_WRITEREG:
912 for (rfpath = 0; rfpath < num_total_rfpath; rfpath++) {
913 rtlphy->rfreg_chnlval[rfpath] =
914 ((rtlphy->rfreg_chnlval[rfpath] &
915 0xfffffc00) | currentcmd->para2);
916
917 rtl_set_rfreg(hw, (enum radio_path)rfpath,
918 currentcmd->para1,
919 RFREG_OFFSET_MASK,
920 rtlphy->rfreg_chnlval[rfpath]);
921 }
922 break;
923 default:
924 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
925 ("switch case not process\n"));
926 break;
927 }
928
929 break;
930 } while (true);
931
932 (*delay) = currentcmd->msdelay;
933 (*step)++;
934 return false;
935}
936
937static bool _rtl92c_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
938 u32 cmdtableidx, u32 cmdtablesz,
939 enum swchnlcmd_id cmdid,
940 u32 para1, u32 para2, u32 msdelay)
941{
942 struct swchnlcmd *pcmd;
943
944 if (cmdtable == NULL) {
945 RT_ASSERT(false, ("cmdtable cannot be NULL.\n"));
946 return false;
947 }
948
949 if (cmdtableidx >= cmdtablesz)
950 return false;
951
952 pcmd = cmdtable + cmdtableidx;
953 pcmd->cmdid = cmdid;
954 pcmd->para1 = para1;
955 pcmd->para2 = para2;
956 pcmd->msdelay = msdelay;
957 return true;
958}
959
960bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw, u32 rfpath)
961{
962 return true;
963}
964
965static u8 _rtl92c_phy_path_a_iqk(struct ieee80211_hw *hw, bool config_pathb)
966{
967 u32 reg_eac, reg_e94, reg_e9c, reg_ea4;
968 u8 result = 0x00;
969
970 rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x10008c1f);
971 rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x10008c1f);
972 rtl_set_bbreg(hw, 0xe38, MASKDWORD, 0x82140102);
973 rtl_set_bbreg(hw, 0xe3c, MASKDWORD,
974 config_pathb ? 0x28160202 : 0x28160502);
975
976 if (config_pathb) {
977 rtl_set_bbreg(hw, 0xe50, MASKDWORD, 0x10008c22);
978 rtl_set_bbreg(hw, 0xe54, MASKDWORD, 0x10008c22);
979 rtl_set_bbreg(hw, 0xe58, MASKDWORD, 0x82140102);
980 rtl_set_bbreg(hw, 0xe5c, MASKDWORD, 0x28160202);
981 }
982
983 rtl_set_bbreg(hw, 0xe4c, MASKDWORD, 0x001028d1);
984 rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf9000000);
985 rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf8000000);
986
987 mdelay(IQK_DELAY_TIME);
988
989 reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
990 reg_e94 = rtl_get_bbreg(hw, 0xe94, MASKDWORD);
991 reg_e9c = rtl_get_bbreg(hw, 0xe9c, MASKDWORD);
992 reg_ea4 = rtl_get_bbreg(hw, 0xea4, MASKDWORD);
993
994 if (!(reg_eac & BIT(28)) &&
995 (((reg_e94 & 0x03FF0000) >> 16) != 0x142) &&
996 (((reg_e9c & 0x03FF0000) >> 16) != 0x42))
997 result |= 0x01;
998 else
999 return result;
1000
1001 if (!(reg_eac & BIT(27)) &&
1002 (((reg_ea4 & 0x03FF0000) >> 16) != 0x132) &&
1003 (((reg_eac & 0x03FF0000) >> 16) != 0x36))
1004 result |= 0x02;
1005 return result;
1006}
1007
1008static u8 _rtl92c_phy_path_b_iqk(struct ieee80211_hw *hw)
1009{
1010 u32 reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc;
1011 u8 result = 0x00;
1012
1013 rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000002);
1014 rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000000);
1015 mdelay(IQK_DELAY_TIME);
1016 reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
1017 reg_eb4 = rtl_get_bbreg(hw, 0xeb4, MASKDWORD);
1018 reg_ebc = rtl_get_bbreg(hw, 0xebc, MASKDWORD);
1019 reg_ec4 = rtl_get_bbreg(hw, 0xec4, MASKDWORD);
1020 reg_ecc = rtl_get_bbreg(hw, 0xecc, MASKDWORD);
1021 if (!(reg_eac & BIT(31)) &&
1022 (((reg_eb4 & 0x03FF0000) >> 16) != 0x142) &&
1023 (((reg_ebc & 0x03FF0000) >> 16) != 0x42))
1024 result |= 0x01;
1025 else
1026 return result;
1027
1028 if (!(reg_eac & BIT(30)) &&
1029 (((reg_ec4 & 0x03FF0000) >> 16) != 0x132) &&
1030 (((reg_ecc & 0x03FF0000) >> 16) != 0x36))
1031 result |= 0x02;
1032 return result;
1033}
1034
1035static void _rtl92c_phy_path_a_fill_iqk_matrix(struct ieee80211_hw *hw,
1036 bool iqk_ok, long result[][8],
1037 u8 final_candidate, bool btxonly)
1038{
1039 u32 oldval_0, x, tx0_a, reg;
1040 long y, tx0_c;
1041
1042 if (final_candidate == 0xFF)
1043 return;
1044 else if (iqk_ok) {
1045 oldval_0 = (rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
1046 MASKDWORD) >> 22) & 0x3FF;
1047 x = result[final_candidate][0];
1048 if ((x & 0x00000200) != 0)
1049 x = x | 0xFFFFFC00;
1050 tx0_a = (x * oldval_0) >> 8;
1051 rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x3FF, tx0_a);
1052 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(31),
1053 ((x * oldval_0 >> 7) & 0x1));
1054 y = result[final_candidate][1];
1055 if ((y & 0x00000200) != 0)
1056 y = y | 0xFFFFFC00;
1057 tx0_c = (y * oldval_0) >> 8;
1058 rtl_set_bbreg(hw, ROFDM0_XCTXAFE, 0xF0000000,
1059 ((tx0_c & 0x3C0) >> 6));
1060 rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x003F0000,
1061 (tx0_c & 0x3F));
1062 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(29),
1063 ((y * oldval_0 >> 7) & 0x1));
1064 if (btxonly)
1065 return;
1066 reg = result[final_candidate][2];
1067 rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0x3FF, reg);
1068 reg = result[final_candidate][3] & 0x3F;
1069 rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0xFC00, reg);
1070 reg = (result[final_candidate][3] >> 6) & 0xF;
1071 rtl_set_bbreg(hw, 0xca0, 0xF0000000, reg);
1072 }
1073}
1074
1075static void _rtl92c_phy_path_b_fill_iqk_matrix(struct ieee80211_hw *hw,
1076 bool iqk_ok, long result[][8],
1077 u8 final_candidate, bool btxonly)
1078{
1079 u32 oldval_1, x, tx1_a, reg;
1080 long y, tx1_c;
1081
1082 if (final_candidate == 0xFF)
1083 return;
1084 else if (iqk_ok) {
1085 oldval_1 = (rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE,
1086 MASKDWORD) >> 22) & 0x3FF;
1087 x = result[final_candidate][4];
1088 if ((x & 0x00000200) != 0)
1089 x = x | 0xFFFFFC00;
1090 tx1_a = (x * oldval_1) >> 8;
1091 rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 0x3FF, tx1_a);
1092 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(27),
1093 ((x * oldval_1 >> 7) & 0x1));
1094 y = result[final_candidate][5];
1095 if ((y & 0x00000200) != 0)
1096 y = y | 0xFFFFFC00;
1097 tx1_c = (y * oldval_1) >> 8;
1098 rtl_set_bbreg(hw, ROFDM0_XDTXAFE, 0xF0000000,
1099 ((tx1_c & 0x3C0) >> 6));
1100 rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 0x003F0000,
1101 (tx1_c & 0x3F));
1102 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(25),
1103 ((y * oldval_1 >> 7) & 0x1));
1104 if (btxonly)
1105 return;
1106 reg = result[final_candidate][6];
1107 rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0x3FF, reg);
1108 reg = result[final_candidate][7] & 0x3F;
1109 rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0xFC00, reg);
1110 reg = (result[final_candidate][7] >> 6) & 0xF;
1111 rtl_set_bbreg(hw, ROFDM0_AGCRSSITABLE, 0x0000F000, reg);
1112 }
1113}
1114
1115static void _rtl92c_phy_save_adda_registers(struct ieee80211_hw *hw,
1116 u32 *addareg, u32 *addabackup,
1117 u32 registernum)
1118{
1119 u32 i;
1120
1121 for (i = 0; i < registernum; i++)
1122 addabackup[i] = rtl_get_bbreg(hw, addareg[i], MASKDWORD);
1123}
1124
1125static void _rtl92c_phy_save_mac_registers(struct ieee80211_hw *hw,
1126 u32 *macreg, u32 *macbackup)
1127{
1128 struct rtl_priv *rtlpriv = rtl_priv(hw);
1129 u32 i;
1130
1131 for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
1132 macbackup[i] = rtl_read_byte(rtlpriv, macreg[i]);
1133 macbackup[i] = rtl_read_dword(rtlpriv, macreg[i]);
1134}
1135
1136static void _rtl92c_phy_reload_adda_registers(struct ieee80211_hw *hw,
1137 u32 *addareg, u32 *addabackup,
1138 u32 regiesternum)
1139{
1140 u32 i;
1141
1142 for (i = 0; i < regiesternum; i++)
1143 rtl_set_bbreg(hw, addareg[i], MASKDWORD, addabackup[i]);
1144}
1145
1146static void _rtl92c_phy_reload_mac_registers(struct ieee80211_hw *hw,
1147 u32 *macreg, u32 *macbackup)
1148{
1149 struct rtl_priv *rtlpriv = rtl_priv(hw);
1150 u32 i;
1151
1152 for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
1153 rtl_write_byte(rtlpriv, macreg[i], (u8) macbackup[i]);
1154 rtl_write_dword(rtlpriv, macreg[i], macbackup[i]);
1155}
1156
1157static void _rtl92c_phy_path_adda_on(struct ieee80211_hw *hw,
1158 u32 *addareg, bool is_patha_on, bool is2t)
1159{
1160 u32 pathOn;
1161 u32 i;
1162
1163 pathOn = is_patha_on ? 0x04db25a4 : 0x0b1b25a4;
1164 if (false == is2t) {
1165 pathOn = 0x0bdb25a0;
1166 rtl_set_bbreg(hw, addareg[0], MASKDWORD, 0x0b1b25a0);
1167 } else {
1168 rtl_set_bbreg(hw, addareg[0], MASKDWORD, pathOn);
1169 }
1170
1171 for (i = 1; i < IQK_ADDA_REG_NUM; i++)
1172 rtl_set_bbreg(hw, addareg[i], MASKDWORD, pathOn);
1173}
1174
1175static void _rtl92c_phy_mac_setting_calibration(struct ieee80211_hw *hw,
1176 u32 *macreg, u32 *macbackup)
1177{
1178 struct rtl_priv *rtlpriv = rtl_priv(hw);
1179 u32 i;
1180
1181 rtl_write_byte(rtlpriv, macreg[0], 0x3F);
1182
1183 for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++)
1184 rtl_write_byte(rtlpriv, macreg[i],
1185 (u8) (macbackup[i] & (~BIT(3))));
1186 rtl_write_byte(rtlpriv, macreg[i], (u8) (macbackup[i] & (~BIT(5))));
1187}
1188
1189static void _rtl92c_phy_path_a_standby(struct ieee80211_hw *hw)
1190{
1191 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x0);
1192 rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
1193 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
1194}
1195
1196static void _rtl92c_phy_pi_mode_switch(struct ieee80211_hw *hw, bool pi_mode)
1197{
1198 u32 mode;
1199
1200 mode = pi_mode ? 0x01000100 : 0x01000000;
1201 rtl_set_bbreg(hw, 0x820, MASKDWORD, mode);
1202 rtl_set_bbreg(hw, 0x828, MASKDWORD, mode);
1203}
1204
1205static bool _rtl92c_phy_simularity_compare(struct ieee80211_hw *hw,
1206 long result[][8], u8 c1, u8 c2)
1207{
1208 u32 i, j, diff, simularity_bitmap, bound;
1209 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1210
1211 u8 final_candidate[2] = { 0xFF, 0xFF };
1212 bool bresult = true, is2t = IS_92C_SERIAL(rtlhal->version);
1213
1214 if (is2t)
1215 bound = 8;
1216 else
1217 bound = 4;
1218
1219 simularity_bitmap = 0;
1220
1221 for (i = 0; i < bound; i++) {
1222 diff = (result[c1][i] > result[c2][i]) ?
1223 (result[c1][i] - result[c2][i]) :
1224 (result[c2][i] - result[c1][i]);
1225
1226 if (diff > MAX_TOLERANCE) {
1227 if ((i == 2 || i == 6) && !simularity_bitmap) {
1228 if (result[c1][i] + result[c1][i + 1] == 0)
1229 final_candidate[(i / 4)] = c2;
1230 else if (result[c2][i] + result[c2][i + 1] == 0)
1231 final_candidate[(i / 4)] = c1;
1232 else
1233 simularity_bitmap = simularity_bitmap |
1234 (1 << i);
1235 } else
1236 simularity_bitmap =
1237 simularity_bitmap | (1 << i);
1238 }
1239 }
1240
1241 if (simularity_bitmap == 0) {
1242 for (i = 0; i < (bound / 4); i++) {
1243 if (final_candidate[i] != 0xFF) {
1244 for (j = i * 4; j < (i + 1) * 4 - 2; j++)
1245 result[3][j] =
1246 result[final_candidate[i]][j];
1247 bresult = false;
1248 }
1249 }
1250 return bresult;
1251 } else if (!(simularity_bitmap & 0x0F)) {
1252 for (i = 0; i < 4; i++)
1253 result[3][i] = result[c1][i];
1254 return false;
1255 } else if (!(simularity_bitmap & 0xF0) && is2t) {
1256 for (i = 4; i < 8; i++)
1257 result[3][i] = result[c1][i];
1258 return false;
1259 } else {
1260 return false;
1261 }
1262
1263}
1264
1265static void _rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw,
1266 long result[][8], u8 t, bool is2t)
1267{
1268 struct rtl_priv *rtlpriv = rtl_priv(hw);
1269 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1270 u32 i;
1271 u8 patha_ok, pathb_ok;
1272 u32 adda_reg[IQK_ADDA_REG_NUM] = {
1273 0x85c, 0xe6c, 0xe70, 0xe74,
1274 0xe78, 0xe7c, 0xe80, 0xe84,
1275 0xe88, 0xe8c, 0xed0, 0xed4,
1276 0xed8, 0xedc, 0xee0, 0xeec
1277 };
1278
1279 u32 iqk_mac_reg[IQK_MAC_REG_NUM] = {
1280 0x522, 0x550, 0x551, 0x040
1281 };
1282
1283 const u32 retrycount = 2;
1284
1285 u32 bbvalue;
1286
1287 if (t == 0) {
1288 bbvalue = rtl_get_bbreg(hw, 0x800, MASKDWORD);
1289
1290 _rtl92c_phy_save_adda_registers(hw, adda_reg,
1291 rtlphy->adda_backup, 16);
1292 _rtl92c_phy_save_mac_registers(hw, iqk_mac_reg,
1293 rtlphy->iqk_mac_backup);
1294 }
1295 _rtl92c_phy_path_adda_on(hw, adda_reg, true, is2t);
1296 if (t == 0) {
1297 rtlphy->rfpi_enable = (u8) rtl_get_bbreg(hw,
1298 RFPGA0_XA_HSSIPARAMETER1,
1299 BIT(8));
1300 }
1301 if (!rtlphy->rfpi_enable)
1302 _rtl92c_phy_pi_mode_switch(hw, true);
1303 if (t == 0) {
1304 rtlphy->reg_c04 = rtl_get_bbreg(hw, 0xc04, MASKDWORD);
1305 rtlphy->reg_c08 = rtl_get_bbreg(hw, 0xc08, MASKDWORD);
1306 rtlphy->reg_874 = rtl_get_bbreg(hw, 0x874, MASKDWORD);
1307 }
1308 rtl_set_bbreg(hw, 0xc04, MASKDWORD, 0x03a05600);
1309 rtl_set_bbreg(hw, 0xc08, MASKDWORD, 0x000800e4);
1310 rtl_set_bbreg(hw, 0x874, MASKDWORD, 0x22204000);
1311 if (is2t) {
1312 rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
1313 rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00010000);
1314 }
1315 _rtl92c_phy_mac_setting_calibration(hw, iqk_mac_reg,
1316 rtlphy->iqk_mac_backup);
1317 rtl_set_bbreg(hw, 0xb68, MASKDWORD, 0x00080000);
1318 if (is2t)
1319 rtl_set_bbreg(hw, 0xb6c, MASKDWORD, 0x00080000);
1320 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
1321 rtl_set_bbreg(hw, 0xe40, MASKDWORD, 0x01007c00);
1322 rtl_set_bbreg(hw, 0xe44, MASKDWORD, 0x01004800);
1323 for (i = 0; i < retrycount; i++) {
1324 patha_ok = _rtl92c_phy_path_a_iqk(hw, is2t);
1325 if (patha_ok == 0x03) {
1326 result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
1327 0x3FF0000) >> 16;
1328 result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
1329 0x3FF0000) >> 16;
1330 result[t][2] = (rtl_get_bbreg(hw, 0xea4, MASKDWORD) &
1331 0x3FF0000) >> 16;
1332 result[t][3] = (rtl_get_bbreg(hw, 0xeac, MASKDWORD) &
1333 0x3FF0000) >> 16;
1334 break;
1335 } else if (i == (retrycount - 1) && patha_ok == 0x01)
1336 result[t][0] = (rtl_get_bbreg(hw, 0xe94,
1337 MASKDWORD) & 0x3FF0000) >>
1338 16;
1339 result[t][1] =
1340 (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) & 0x3FF0000) >> 16;
1341
1342 }
1343
1344 if (is2t) {
1345 _rtl92c_phy_path_a_standby(hw);
1346 _rtl92c_phy_path_adda_on(hw, adda_reg, false, is2t);
1347 for (i = 0; i < retrycount; i++) {
1348 pathb_ok = _rtl92c_phy_path_b_iqk(hw);
1349 if (pathb_ok == 0x03) {
1350 result[t][4] = (rtl_get_bbreg(hw,
1351 0xeb4,
1352 MASKDWORD) &
1353 0x3FF0000) >> 16;
1354 result[t][5] =
1355 (rtl_get_bbreg(hw, 0xebc, MASKDWORD) &
1356 0x3FF0000) >> 16;
1357 result[t][6] =
1358 (rtl_get_bbreg(hw, 0xec4, MASKDWORD) &
1359 0x3FF0000) >> 16;
1360 result[t][7] =
1361 (rtl_get_bbreg(hw, 0xecc, MASKDWORD) &
1362 0x3FF0000) >> 16;
1363 break;
1364 } else if (i == (retrycount - 1) && pathb_ok == 0x01) {
1365 result[t][4] = (rtl_get_bbreg(hw,
1366 0xeb4,
1367 MASKDWORD) &
1368 0x3FF0000) >> 16;
1369 }
1370 result[t][5] = (rtl_get_bbreg(hw, 0xebc, MASKDWORD) &
1371 0x3FF0000) >> 16;
1372 }
1373 }
1374 rtl_set_bbreg(hw, 0xc04, MASKDWORD, rtlphy->reg_c04);
1375 rtl_set_bbreg(hw, 0x874, MASKDWORD, rtlphy->reg_874);
1376 rtl_set_bbreg(hw, 0xc08, MASKDWORD, rtlphy->reg_c08);
1377 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0);
1378 rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00032ed3);
1379 if (is2t)
1380 rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00032ed3);
1381 if (t != 0) {
1382 if (!rtlphy->rfpi_enable)
1383 _rtl92c_phy_pi_mode_switch(hw, false);
1384 _rtl92c_phy_reload_adda_registers(hw, adda_reg,
1385 rtlphy->adda_backup, 16);
1386 _rtl92c_phy_reload_mac_registers(hw, iqk_mac_reg,
1387 rtlphy->iqk_mac_backup);
1388 }
1389}
1390
1391static void _rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw,
1392 char delta, bool is2t)
1393{
1394 /* This routine is deliberately dummied out for later fixes */
1395#if 0
1396 struct rtl_priv *rtlpriv = rtl_priv(hw);
1397 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1398 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1399
1400 u32 reg_d[PATH_NUM];
1401 u32 tmpreg, index, offset, path, i, pathbound = PATH_NUM, apkbound;
1402
1403 u32 bb_backup[APK_BB_REG_NUM];
1404 u32 bb_reg[APK_BB_REG_NUM] = {
1405 0x904, 0xc04, 0x800, 0xc08, 0x874
1406 };
1407 u32 bb_ap_mode[APK_BB_REG_NUM] = {
1408 0x00000020, 0x00a05430, 0x02040000,
1409 0x000800e4, 0x00204000
1410 };
1411 u32 bb_normal_ap_mode[APK_BB_REG_NUM] = {
1412 0x00000020, 0x00a05430, 0x02040000,
1413 0x000800e4, 0x22204000
1414 };
1415
1416 u32 afe_backup[APK_AFE_REG_NUM];
1417 u32 afe_reg[APK_AFE_REG_NUM] = {
1418 0x85c, 0xe6c, 0xe70, 0xe74, 0xe78,
1419 0xe7c, 0xe80, 0xe84, 0xe88, 0xe8c,
1420 0xed0, 0xed4, 0xed8, 0xedc, 0xee0,
1421 0xeec
1422 };
1423
1424 u32 mac_backup[IQK_MAC_REG_NUM];
1425 u32 mac_reg[IQK_MAC_REG_NUM] = {
1426 0x522, 0x550, 0x551, 0x040
1427 };
1428
1429 u32 apk_rf_init_value[PATH_NUM][APK_BB_REG_NUM] = {
1430 {0x0852c, 0x1852c, 0x5852c, 0x1852c, 0x5852c},
1431 {0x2852e, 0x0852e, 0x3852e, 0x0852e, 0x0852e}
1432 };
1433
1434 u32 apk_normal_rf_init_value[PATH_NUM][APK_BB_REG_NUM] = {
1435 {0x0852c, 0x0a52c, 0x3a52c, 0x5a52c, 0x5a52c},
1436 {0x0852c, 0x0a52c, 0x5a52c, 0x5a52c, 0x5a52c}
1437 };
1438
1439 u32 apk_rf_value_0[PATH_NUM][APK_BB_REG_NUM] = {
1440 {0x52019, 0x52014, 0x52013, 0x5200f, 0x5208d},
1441 {0x5201a, 0x52019, 0x52016, 0x52033, 0x52050}
1442 };
1443
1444 u32 apk_normal_rf_value_0[PATH_NUM][APK_BB_REG_NUM] = {
1445 {0x52019, 0x52017, 0x52010, 0x5200d, 0x5206a},
1446 {0x52019, 0x52017, 0x52010, 0x5200d, 0x5206a}
1447 };
1448
1449 u32 afe_on_off[PATH_NUM] = {
1450 0x04db25a4, 0x0b1b25a4
1451 };
1452
1453 u32 apk_offset[PATH_NUM] = { 0xb68, 0xb6c };
1454
1455 u32 apk_normal_offset[PATH_NUM] = { 0xb28, 0xb98 };
1456
1457 u32 apk_value[PATH_NUM] = { 0x92fc0000, 0x12fc0000 };
1458
1459 u32 apk_normal_value[PATH_NUM] = { 0x92680000, 0x12680000 };
1460
1461 const char apk_delta_mapping[APK_BB_REG_NUM][13] = {
1462 {-4, -3, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
1463 {-4, -3, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
1464 {-6, -4, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
1465 {-1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6},
1466 {-11, -9, -7, -5, -3, -1, 0, 0, 0, 0, 0, 0, 0}
1467 };
1468
1469 const u32 apk_normal_setting_value_1[13] = {
1470 0x01017018, 0xf7ed8f84, 0x1b1a1816, 0x2522201e, 0x322e2b28,
1471 0x433f3a36, 0x5b544e49, 0x7b726a62, 0xa69a8f84, 0xdfcfc0b3,
1472 0x12680000, 0x00880000, 0x00880000
1473 };
1474
1475 const u32 apk_normal_setting_value_2[16] = {
1476 0x01c7021d, 0x01670183, 0x01000123, 0x00bf00e2, 0x008d00a3,
1477 0x0068007b, 0x004d0059, 0x003a0042, 0x002b0031, 0x001f0025,
1478 0x0017001b, 0x00110014, 0x000c000f, 0x0009000b, 0x00070008,
1479 0x00050006
1480 };
1481
1482 const u32 apk_result[PATH_NUM][APK_BB_REG_NUM];
1483
1484 long bb_offset, delta_v, delta_offset;
1485
1486 if (!is2t)
1487 pathbound = 1;
1488
1489 for (index = 0; index < PATH_NUM; index++) {
1490 apk_offset[index] = apk_normal_offset[index];
1491 apk_value[index] = apk_normal_value[index];
1492 afe_on_off[index] = 0x6fdb25a4;
1493 }
1494
1495 for (index = 0; index < APK_BB_REG_NUM; index++) {
1496 for (path = 0; path < pathbound; path++) {
1497 apk_rf_init_value[path][index] =
1498 apk_normal_rf_init_value[path][index];
1499 apk_rf_value_0[path][index] =
1500 apk_normal_rf_value_0[path][index];
1501 }
1502 bb_ap_mode[index] = bb_normal_ap_mode[index];
1503
1504 apkbound = 6;
1505 }
1506
1507 for (index = 0; index < APK_BB_REG_NUM; index++) {
1508 if (index == 0)
1509 continue;
1510 bb_backup[index] = rtl_get_bbreg(hw, bb_reg[index], MASKDWORD);
1511 }
1512
1513 _rtl92c_phy_save_mac_registers(hw, mac_reg, mac_backup);
1514
1515 _rtl92c_phy_save_adda_registers(hw, afe_reg, afe_backup, 16);
1516
1517 for (path = 0; path < pathbound; path++) {
1518 if (path == RF90_PATH_A) {
1519 offset = 0xb00;
1520 for (index = 0; index < 11; index++) {
1521 rtl_set_bbreg(hw, offset, MASKDWORD,
1522 apk_normal_setting_value_1
1523 [index]);
1524
1525 offset += 0x04;
1526 }
1527
1528 rtl_set_bbreg(hw, 0xb98, MASKDWORD, 0x12680000);
1529
1530 offset = 0xb68;
1531 for (; index < 13; index++) {
1532 rtl_set_bbreg(hw, offset, MASKDWORD,
1533 apk_normal_setting_value_1
1534 [index]);
1535
1536 offset += 0x04;
1537 }
1538
1539 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x40000000);
1540
1541 offset = 0xb00;
1542 for (index = 0; index < 16; index++) {
1543 rtl_set_bbreg(hw, offset, MASKDWORD,
1544 apk_normal_setting_value_2
1545 [index]);
1546
1547 offset += 0x04;
1548 }
1549 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000);
1550 } else if (path == RF90_PATH_B) {
1551 offset = 0xb70;
1552 for (index = 0; index < 10; index++) {
1553 rtl_set_bbreg(hw, offset, MASKDWORD,
1554 apk_normal_setting_value_1
1555 [index]);
1556
1557 offset += 0x04;
1558 }
1559 rtl_set_bbreg(hw, 0xb28, MASKDWORD, 0x12680000);
1560 rtl_set_bbreg(hw, 0xb98, MASKDWORD, 0x12680000);
1561
1562 offset = 0xb68;
1563 index = 11;
1564 for (; index < 13; index++) {
1565 rtl_set_bbreg(hw, offset, MASKDWORD,
1566 apk_normal_setting_value_1
1567 [index]);
1568
1569 offset += 0x04;
1570 }
1571
1572 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x40000000);
1573
1574 offset = 0xb60;
1575 for (index = 0; index < 16; index++) {
1576 rtl_set_bbreg(hw, offset, MASKDWORD,
1577 apk_normal_setting_value_2
1578 [index]);
1579
1580 offset += 0x04;
1581 }
1582 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000);
1583 }
1584
1585 reg_d[path] = rtl_get_rfreg(hw, (enum radio_path)path,
1586 0xd, MASKDWORD);
1587
1588 for (index = 0; index < APK_AFE_REG_NUM; index++)
1589 rtl_set_bbreg(hw, afe_reg[index], MASKDWORD,
1590 afe_on_off[path]);
1591
1592 if (path == RF90_PATH_A) {
1593 for (index = 0; index < APK_BB_REG_NUM; index++) {
1594 if (index == 0)
1595 continue;
1596 rtl_set_bbreg(hw, bb_reg[index], MASKDWORD,
1597 bb_ap_mode[index]);
1598 }
1599 }
1600
1601 _rtl92c_phy_mac_setting_calibration(hw, mac_reg, mac_backup);
1602
1603 if (path == 0) {
1604 rtl_set_rfreg(hw, RF90_PATH_B, 0x0, MASKDWORD, 0x10000);
1605 } else {
1606 rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASKDWORD,
1607 0x10000);
1608 rtl_set_rfreg(hw, RF90_PATH_A, 0x10, MASKDWORD,
1609 0x1000f);
1610 rtl_set_rfreg(hw, RF90_PATH_A, 0x11, MASKDWORD,
1611 0x20103);
1612 }
1613
1614 delta_offset = ((delta + 14) / 2);
1615 if (delta_offset < 0)
1616 delta_offset = 0;
1617 else if (delta_offset > 12)
1618 delta_offset = 12;
1619
1620 for (index = 0; index < APK_BB_REG_NUM; index++) {
1621 if (index != 1)
1622 continue;
1623
1624 tmpreg = apk_rf_init_value[path][index];
1625
1626 if (!rtlefuse->apk_thermalmeterignore) {
1627 bb_offset = (tmpreg & 0xF0000) >> 16;
1628
1629 if (!(tmpreg & BIT(15)))
1630 bb_offset = -bb_offset;
1631
1632 delta_v =
1633 apk_delta_mapping[index][delta_offset];
1634
1635 bb_offset += delta_v;
1636
1637 if (bb_offset < 0) {
1638 tmpreg = tmpreg & (~BIT(15));
1639 bb_offset = -bb_offset;
1640 } else {
1641 tmpreg = tmpreg | BIT(15);
1642 }
1643
1644 tmpreg =
1645 (tmpreg & 0xFFF0FFFF) | (bb_offset << 16);
1646 }
1647
1648 rtl_set_rfreg(hw, (enum radio_path)path, 0xc,
1649 MASKDWORD, 0x8992e);
1650 rtl_set_rfreg(hw, (enum radio_path)path, 0x0,
1651 MASKDWORD, apk_rf_value_0[path][index]);
1652 rtl_set_rfreg(hw, (enum radio_path)path, 0xd,
1653 MASKDWORD, tmpreg);
1654
1655 i = 0;
1656 do {
1657 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80000000);
1658 rtl_set_bbreg(hw, apk_offset[path],
1659 MASKDWORD, apk_value[0]);
1660 RTPRINT(rtlpriv, FINIT, INIT_IQK,
1661 ("PHY_APCalibrate() offset 0x%x "
1662 "value 0x%x\n",
1663 apk_offset[path],
1664 rtl_get_bbreg(hw, apk_offset[path],
1665 MASKDWORD)));
1666
1667 mdelay(3);
1668
1669 rtl_set_bbreg(hw, apk_offset[path],
1670 MASKDWORD, apk_value[1]);
1671 RTPRINT(rtlpriv, FINIT, INIT_IQK,
1672 ("PHY_APCalibrate() offset 0x%x "
1673 "value 0x%x\n",
1674 apk_offset[path],
1675 rtl_get_bbreg(hw, apk_offset[path],
1676 MASKDWORD)));
1677
1678 mdelay(20);
1679
1680 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000);
1681
1682 if (path == RF90_PATH_A)
1683 tmpreg = rtl_get_bbreg(hw, 0xbd8,
1684 0x03E00000);
1685 else
1686 tmpreg = rtl_get_bbreg(hw, 0xbd8,
1687 0xF8000000);
1688
1689 RTPRINT(rtlpriv, FINIT, INIT_IQK,
1690 ("PHY_APCalibrate() offset "
1691 "0xbd8[25:21] %x\n", tmpreg));
1692
1693 i++;
1694
1695 } while (tmpreg > apkbound && i < 4);
1696
1697 apk_result[path][index] = tmpreg;
1698 }
1699 }
1700
1701 _rtl92c_phy_reload_mac_registers(hw, mac_reg, mac_backup);
1702
1703 for (index = 0; index < APK_BB_REG_NUM; index++) {
1704 if (index == 0)
1705 continue;
1706 rtl_set_bbreg(hw, bb_reg[index], MASKDWORD, bb_backup[index]);
1707 }
1708
1709 _rtl92c_phy_reload_adda_registers(hw, afe_reg, afe_backup, 16);
1710
1711 for (path = 0; path < pathbound; path++) {
1712 rtl_set_rfreg(hw, (enum radio_path)path, 0xd,
1713 MASKDWORD, reg_d[path]);
1714
1715 if (path == RF90_PATH_B) {
1716 rtl_set_rfreg(hw, RF90_PATH_A, 0x10, MASKDWORD,
1717 0x1000f);
1718 rtl_set_rfreg(hw, RF90_PATH_A, 0x11, MASKDWORD,
1719 0x20101);
1720 }
1721
1722 if (apk_result[path][1] > 6)
1723 apk_result[path][1] = 6;
1724 }
1725
1726 for (path = 0; path < pathbound; path++) {
1727 rtl_set_rfreg(hw, (enum radio_path)path, 0x3, MASKDWORD,
1728 ((apk_result[path][1] << 15) |
1729 (apk_result[path][1] << 10) |
1730 (apk_result[path][1] << 5) |
1731 apk_result[path][1]));
1732
1733 if (path == RF90_PATH_A)
1734 rtl_set_rfreg(hw, (enum radio_path)path, 0x4, MASKDWORD,
1735 ((apk_result[path][1] << 15) |
1736 (apk_result[path][1] << 10) |
1737 (0x00 << 5) | 0x05));
1738 else
1739 rtl_set_rfreg(hw, (enum radio_path)path, 0x4, MASKDWORD,
1740 ((apk_result[path][1] << 15) |
1741 (apk_result[path][1] << 10) |
1742 (0x02 << 5) | 0x05));
1743
1744 rtl_set_rfreg(hw, (enum radio_path)path, 0xe, MASKDWORD,
1745 ((0x08 << 15) | (0x08 << 10) | (0x08 << 5) |
1746 0x08));
1747
1748 }
1749
1750 rtlphy->apk_done = true;
1751#endif
1752}
1753
1754static void _rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw,
1755 bool bmain, bool is2t)
1756{
1757 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1758
1759 if (is_hal_stop(rtlhal)) {
1760 rtl_set_bbreg(hw, REG_LEDCFG0, BIT(23), 0x01);
1761 rtl_set_bbreg(hw, rFPGA0_XAB_RFPARAMETER, BIT(13), 0x01);
1762 }
1763 if (is2t) {
1764 if (bmain)
1765 rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
1766 BIT(5) | BIT(6), 0x1);
1767 else
1768 rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
1769 BIT(5) | BIT(6), 0x2);
1770 } else {
1771 if (bmain)
1772 rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300, 0x2);
1773 else
1774 rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300, 0x1);
1775
1776 }
1777}
1778
1779#undef IQK_ADDA_REG_NUM
1780#undef IQK_DELAY_TIME
1781
1782void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery)
1783{
1784 struct rtl_priv *rtlpriv = rtl_priv(hw);
1785 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1786 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1787
1788 long result[4][8];
1789 u8 i, final_candidate;
1790 bool patha_ok, pathb_ok;
1791 long reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4,
1792 reg_ecc, reg_tmp = 0;
1793 bool is12simular, is13simular, is23simular;
1794 bool start_conttx = false, singletone = false;
1795 u32 iqk_bb_reg[10] = {
1796 ROFDM0_XARXIQIMBALANCE,
1797 ROFDM0_XBRXIQIMBALANCE,
1798 ROFDM0_ECCATHRESHOLD,
1799 ROFDM0_AGCRSSITABLE,
1800 ROFDM0_XATXIQIMBALANCE,
1801 ROFDM0_XBTXIQIMBALANCE,
1802 ROFDM0_XCTXIQIMBALANCE,
1803 ROFDM0_XCTXAFE,
1804 ROFDM0_XDTXAFE,
1805 ROFDM0_RXIQEXTANTA
1806 };
1807
1808 if (recovery) {
1809 _rtl92c_phy_reload_adda_registers(hw,
1810 iqk_bb_reg,
1811 rtlphy->iqk_bb_backup, 10);
1812 return;
1813 }
1814 if (start_conttx || singletone)
1815 return;
1816 for (i = 0; i < 8; i++) {
1817 result[0][i] = 0;
1818 result[1][i] = 0;
1819 result[2][i] = 0;
1820 result[3][i] = 0;
1821 }
1822 final_candidate = 0xff;
1823 patha_ok = false;
1824 pathb_ok = false;
1825 is12simular = false;
1826 is23simular = false;
1827 is13simular = false;
1828 for (i = 0; i < 3; i++) {
1829 if (IS_92C_SERIAL(rtlhal->version))
1830 _rtl92c_phy_iq_calibrate(hw, result, i, true);
1831 else
1832 _rtl92c_phy_iq_calibrate(hw, result, i, false);
1833 if (i == 1) {
1834 is12simular = _rtl92c_phy_simularity_compare(hw,
1835 result, 0,
1836 1);
1837 if (is12simular) {
1838 final_candidate = 0;
1839 break;
1840 }
1841 }
1842 if (i == 2) {
1843 is13simular = _rtl92c_phy_simularity_compare(hw,
1844 result, 0,
1845 2);
1846 if (is13simular) {
1847 final_candidate = 0;
1848 break;
1849 }
1850 is23simular = _rtl92c_phy_simularity_compare(hw,
1851 result, 1,
1852 2);
1853 if (is23simular)
1854 final_candidate = 1;
1855 else {
1856 for (i = 0; i < 8; i++)
1857 reg_tmp += result[3][i];
1858
1859 if (reg_tmp != 0)
1860 final_candidate = 3;
1861 else
1862 final_candidate = 0xFF;
1863 }
1864 }
1865 }
1866 for (i = 0; i < 4; i++) {
1867 reg_e94 = result[i][0];
1868 reg_e9c = result[i][1];
1869 reg_ea4 = result[i][2];
1870 reg_eac = result[i][3];
1871 reg_eb4 = result[i][4];
1872 reg_ebc = result[i][5];
1873 reg_ec4 = result[i][6];
1874 reg_ecc = result[i][7];
1875 }
1876 if (final_candidate != 0xff) {
1877 rtlphy->reg_e94 = reg_e94 = result[final_candidate][0];
1878 rtlphy->reg_e9c = reg_e9c = result[final_candidate][1];
1879 reg_ea4 = result[final_candidate][2];
1880 reg_eac = result[final_candidate][3];
1881 rtlphy->reg_eb4 = reg_eb4 = result[final_candidate][4];
1882 rtlphy->reg_ebc = reg_ebc = result[final_candidate][5];
1883 reg_ec4 = result[final_candidate][6];
1884 reg_ecc = result[final_candidate][7];
1885 patha_ok = pathb_ok = true;
1886 } else {
1887 rtlphy->reg_e94 = rtlphy->reg_eb4 = 0x100;
1888 rtlphy->reg_e9c = rtlphy->reg_ebc = 0x0;
1889 }
1890 if (reg_e94 != 0) /*&&(reg_ea4 != 0) */
1891 _rtl92c_phy_path_a_fill_iqk_matrix(hw, patha_ok, result,
1892 final_candidate,
1893 (reg_ea4 == 0));
1894 if (IS_92C_SERIAL(rtlhal->version)) {
1895 if (reg_eb4 != 0) /*&&(reg_ec4 != 0) */
1896 _rtl92c_phy_path_b_fill_iqk_matrix(hw, pathb_ok,
1897 result,
1898 final_candidate,
1899 (reg_ec4 == 0));
1900 }
1901 _rtl92c_phy_save_adda_registers(hw, iqk_bb_reg,
1902 rtlphy->iqk_bb_backup, 10);
1903}
1904
1905void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw)
1906{
1907 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1908 bool start_conttx = false, singletone = false;
1909
1910 if (start_conttx || singletone)
1911 return;
1912 if (IS_92C_SERIAL(rtlhal->version))
1913 _rtl92c_phy_lc_calibrate(hw, true);
1914 else
1915 _rtl92c_phy_lc_calibrate(hw, false);
1916}
1917
1918void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta)
1919{
1920 struct rtl_priv *rtlpriv = rtl_priv(hw);
1921 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1922 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1923
1924 if (rtlphy->apk_done)
1925 return;
1926 if (IS_92C_SERIAL(rtlhal->version))
1927 _rtl92c_phy_ap_calibrate(hw, delta, true);
1928 else
1929 _rtl92c_phy_ap_calibrate(hw, delta, false);
1930}
1931
1932void rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain)
1933{
1934 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1935
1936 if (IS_92C_SERIAL(rtlhal->version))
1937 _rtl92c_phy_set_rfpath_switch(hw, bmain, true);
1938 else
1939 _rtl92c_phy_set_rfpath_switch(hw, bmain, false);
1940}
1941
1942bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
1943{
1944 struct rtl_priv *rtlpriv = rtl_priv(hw);
1945 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1946 bool postprocessing = false;
1947
1948 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
1949 ("-->IO Cmd(%#x), set_io_inprogress(%d)\n",
1950 iotype, rtlphy->set_io_inprogress));
1951 do {
1952 switch (iotype) {
1953 case IO_CMD_RESUME_DM_BY_SCAN:
1954 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
1955 ("[IO CMD] Resume DM after scan.\n"));
1956 postprocessing = true;
1957 break;
1958 case IO_CMD_PAUSE_DM_BY_SCAN:
1959 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
1960 ("[IO CMD] Pause DM before scan.\n"));
1961 postprocessing = true;
1962 break;
1963 default:
1964 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1965 ("switch case not process\n"));
1966 break;
1967 }
1968 } while (false);
1969 if (postprocessing && !rtlphy->set_io_inprogress) {
1970 rtlphy->set_io_inprogress = true;
1971 rtlphy->current_io_type = iotype;
1972 } else {
1973 return false;
1974 }
1975 rtl92c_phy_set_io(hw);
1976 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, ("<--IO Type(%#x)\n", iotype));
1977 return true;
1978}
1979
1980void rtl92c_phy_set_io(struct ieee80211_hw *hw)
1981{
1982 struct rtl_priv *rtlpriv = rtl_priv(hw);
1983 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1984
1985 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
1986 ("--->Cmd(%#x), set_io_inprogress(%d)\n",
1987 rtlphy->current_io_type, rtlphy->set_io_inprogress));
1988 switch (rtlphy->current_io_type) {
1989 case IO_CMD_RESUME_DM_BY_SCAN:
1990 dm_digtable.cur_igvalue = rtlphy->initgain_backup.xaagccore1;
1991 rtl92c_dm_write_dig(hw);
1992 rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
1993 break;
1994 case IO_CMD_PAUSE_DM_BY_SCAN:
1995 rtlphy->initgain_backup.xaagccore1 = dm_digtable.cur_igvalue;
1996 dm_digtable.cur_igvalue = 0x17;
1997 rtl92c_dm_write_dig(hw);
1998 break;
1999 default:
2000 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
2001 ("switch case not process\n"));
2002 break;
2003 }
2004 rtlphy->set_io_inprogress = false;
2005 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
2006 ("<---(%#x)\n", rtlphy->current_io_type));
2007}
2008
2009void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw)
2010{
2011 struct rtl_priv *rtlpriv = rtl_priv(hw);
2012
2013 rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b);
2014 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
2015 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00);
2016 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
2017 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
2018 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
2019}
2020
2021static void _rtl92ce_phy_set_rf_sleep(struct ieee80211_hw *hw)
2022{
2023 u32 u4b_tmp;
2024 u8 delay = 5;
2025 struct rtl_priv *rtlpriv = rtl_priv(hw);
2026
2027 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
2028 rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
2029 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
2030 u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
2031 while (u4b_tmp != 0 && delay > 0) {
2032 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x0);
2033 rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
2034 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
2035 u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
2036 delay--;
2037 }
2038 if (delay == 0) {
2039 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00);
2040 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
2041 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
2042 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
2043 RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
2044 ("Switch RF timeout !!!.\n"));
2045 return;
2046 }
2047 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
2048 rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x22);
2049}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/Makefile b/drivers/net/wireless/rtlwifi/rtl8192ce/Makefile
index 0f0be7c763b..5c5fdde637c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/Makefile
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/Makefile
@@ -10,3 +10,5 @@ rtl8192ce-objs := \
10 trx.o 10 trx.o
11 11
12obj-$(CONFIG_RTL8192CE) += rtl8192ce.o 12obj-$(CONFIG_RTL8192CE) += rtl8192ce.o
13
14ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
index 83cd6489529..2f577c8828f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
@@ -121,11 +121,37 @@
121#define CHIP_92C 0x01 121#define CHIP_92C 0x01
122#define CHIP_88C 0x00 122#define CHIP_88C 0x00
123 123
124/* Add vendor information into chip version definition.
125 * Add UMC B-Cut and RTL8723 chip info definition.
126 *
127 * BIT 7 Reserved
128 * BIT 6 UMC BCut
129 * BIT 5 Manufacturer(TSMC/UMC)
130 * BIT 4 TEST/NORMAL
131 * BIT 3 8723 Version
132 * BIT 2 8723?
133 * BIT 1 1T2R?
134 * BIT 0 88C/92C
135*/
136
124enum version_8192c { 137enum version_8192c {
125 VERSION_A_CHIP_92C = 0x01, 138 VERSION_A_CHIP_92C = 0x01,
126 VERSION_A_CHIP_88C = 0x00, 139 VERSION_A_CHIP_88C = 0x00,
127 VERSION_B_CHIP_92C = 0x11, 140 VERSION_B_CHIP_92C = 0x11,
128 VERSION_B_CHIP_88C = 0x10, 141 VERSION_B_CHIP_88C = 0x10,
142 VERSION_TEST_CHIP_88C = 0x00,
143 VERSION_TEST_CHIP_92C = 0x01,
144 VERSION_NORMAL_TSMC_CHIP_88C = 0x10,
145 VERSION_NORMAL_TSMC_CHIP_92C = 0x11,
146 VERSION_NORMAL_TSMC_CHIP_92C_1T2R = 0x13,
147 VERSION_NORMAL_UMC_CHIP_88C_A_CUT = 0x30,
148 VERSION_NORMAL_UMC_CHIP_92C_A_CUT = 0x31,
149 VERSION_NORMAL_UMC_CHIP_92C_1T2R_A_CUT = 0x33,
150 VERSION_NORMA_UMC_CHIP_8723_1T1R_A_CUT = 0x34,
151 VERSION_NORMA_UMC_CHIP_8723_1T1R_B_CUT = 0x3c,
152 VERSION_NORMAL_UMC_CHIP_88C_B_CUT = 0x70,
153 VERSION_NORMAL_UMC_CHIP_92C_B_CUT = 0x71,
154 VERSION_NORMAL_UMC_CHIP_92C_1T2R_B_CUT = 0x73,
129 VERSION_UNKNOWN = 0x88, 155 VERSION_UNKNOWN = 0x88,
130}; 156};
131 157
@@ -254,4 +280,122 @@ struct h2c_cmd_8192c {
254 u8 *p_cmdbuffer; 280 u8 *p_cmdbuffer;
255}; 281};
256 282
283static inline u8 _rtl92c_get_chnl_group(u8 chnl)
284{
285 u8 group = 0;
286
287 if (chnl < 3)
288 group = 0;
289 else if (chnl < 9)
290 group = 1;
291 else
292 group = 2;
293
294 return group;
295}
296
297/* NOTE: reference to rtl8192c_rates struct */
298static inline int _rtl92c_rate_mapping(struct ieee80211_hw *hw, bool isHT,
299 u8 desc_rate, bool first_ampdu)
300{
301 struct rtl_priv *rtlpriv = rtl_priv(hw);
302 int rate_idx = 0;
303
304 if (first_ampdu) {
305 if (false == isHT) {
306 switch (desc_rate) {
307 case DESC92C_RATE1M:
308 rate_idx = 0;
309 break;
310 case DESC92C_RATE2M:
311 rate_idx = 1;
312 break;
313 case DESC92C_RATE5_5M:
314 rate_idx = 2;
315 break;
316 case DESC92C_RATE11M:
317 rate_idx = 3;
318 break;
319 case DESC92C_RATE6M:
320 rate_idx = 4;
321 break;
322 case DESC92C_RATE9M:
323 rate_idx = 5;
324 break;
325 case DESC92C_RATE12M:
326 rate_idx = 6;
327 break;
328 case DESC92C_RATE18M:
329 rate_idx = 7;
330 break;
331 case DESC92C_RATE24M:
332 rate_idx = 8;
333 break;
334 case DESC92C_RATE36M:
335 rate_idx = 9;
336 break;
337 case DESC92C_RATE48M:
338 rate_idx = 10;
339 break;
340 case DESC92C_RATE54M:
341 rate_idx = 11;
342 break;
343 default:
344 RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
345 ("Rate %d is not support, set to "
346 "1M rate.\n", desc_rate));
347 rate_idx = 0;
348 break;
349 }
350 } else {
351 rate_idx = 11;
352 }
353 return rate_idx;
354 }
355 switch (desc_rate) {
356 case DESC92C_RATE1M:
357 rate_idx = 0;
358 break;
359 case DESC92C_RATE2M:
360 rate_idx = 1;
361 break;
362 case DESC92C_RATE5_5M:
363 rate_idx = 2;
364 break;
365 case DESC92C_RATE11M:
366 rate_idx = 3;
367 break;
368 case DESC92C_RATE6M:
369 rate_idx = 4;
370 break;
371 case DESC92C_RATE9M:
372 rate_idx = 5;
373 break;
374 case DESC92C_RATE12M:
375 rate_idx = 6;
376 break;
377 case DESC92C_RATE18M:
378 rate_idx = 7;
379 break;
380 case DESC92C_RATE24M:
381 rate_idx = 8;
382 break;
383 case DESC92C_RATE36M:
384 rate_idx = 9;
385 break;
386 case DESC92C_RATE48M:
387 rate_idx = 10;
388 break;
389 case DESC92C_RATE54M:
390 rate_idx = 11;
391 break;
392 /* TODO: How to mapping MCS rate? */
393 /* NOTE: referenc to __ieee80211_rx */
394 default:
395 rate_idx = 11;
396 break;
397 }
398 return rate_idx;
399}
400
257#endif 401#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c
index 62e7c64e087..888df5e2d2f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c
@@ -35,485 +35,16 @@
35#include "dm.h" 35#include "dm.h"
36#include "fw.h" 36#include "fw.h"
37 37
38struct dig_t dm_digtable; 38#include "../rtl8192c/dm_common.c"
39static struct ps_t dm_pstable;
40 39
41static const u32 ofdmswing_table[OFDM_TABLE_SIZE] = { 40void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw)
42 0x7f8001fe,
43 0x788001e2,
44 0x71c001c7,
45 0x6b8001ae,
46 0x65400195,
47 0x5fc0017f,
48 0x5a400169,
49 0x55400155,
50 0x50800142,
51 0x4c000130,
52 0x47c0011f,
53 0x43c0010f,
54 0x40000100,
55 0x3c8000f2,
56 0x390000e4,
57 0x35c000d7,
58 0x32c000cb,
59 0x300000c0,
60 0x2d4000b5,
61 0x2ac000ab,
62 0x288000a2,
63 0x26000098,
64 0x24000090,
65 0x22000088,
66 0x20000080,
67 0x1e400079,
68 0x1c800072,
69 0x1b00006c,
70 0x19800066,
71 0x18000060,
72 0x16c0005b,
73 0x15800056,
74 0x14400051,
75 0x1300004c,
76 0x12000048,
77 0x11000044,
78 0x10000040,
79};
80
81static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = {
82 {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04},
83 {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04},
84 {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03},
85 {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03},
86 {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03},
87 {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03},
88 {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03},
89 {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03},
90 {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02},
91 {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02},
92 {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02},
93 {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02},
94 {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02},
95 {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02},
96 {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02},
97 {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02},
98 {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01},
99 {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02},
100 {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01},
101 {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},
102 {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},
103 {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01},
104 {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01},
105 {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01},
106 {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01},
107 {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01},
108 {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01},
109 {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01},
110 {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01},
111 {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01},
112 {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01},
113 {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01},
114 {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01}
115};
116
117static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
118 {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00},
119 {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00},
120 {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00},
121 {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00},
122 {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00},
123 {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00},
124 {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00},
125 {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00},
126 {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00},
127 {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00},
128 {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00},
129 {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00},
130 {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00},
131 {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00},
132 {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00},
133 {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00},
134 {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00},
135 {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00},
136 {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00},
137 {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},
138 {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},
139 {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00},
140 {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00},
141 {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00},
142 {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00},
143 {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00},
144 {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00},
145 {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00},
146 {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00},
147 {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00},
148 {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00},
149 {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00},
150 {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00}
151};
152
153static void rtl92c_dm_diginit(struct ieee80211_hw *hw)
154{
155 dm_digtable.dig_enable_flag = true;
156 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
157 dm_digtable.cur_igvalue = 0x20;
158 dm_digtable.pre_igvalue = 0x0;
159 dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT;
160 dm_digtable.presta_connectstate = DIG_STA_DISCONNECT;
161 dm_digtable.curmultista_connectstate = DIG_MULTISTA_DISCONNECT;
162 dm_digtable.rssi_lowthresh = DM_DIG_THRESH_LOW;
163 dm_digtable.rssi_highthresh = DM_DIG_THRESH_HIGH;
164 dm_digtable.fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
165 dm_digtable.fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
166 dm_digtable.rx_gain_range_max = DM_DIG_MAX;
167 dm_digtable.rx_gain_range_min = DM_DIG_MIN;
168 dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT;
169 dm_digtable.backoff_val_range_max = DM_DIG_BACKOFF_MAX;
170 dm_digtable.backoff_val_range_min = DM_DIG_BACKOFF_MIN;
171 dm_digtable.pre_cck_pd_state = CCK_PD_STAGE_MAX;
172 dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX;
173}
174
175static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
176{
177 struct rtl_priv *rtlpriv = rtl_priv(hw);
178 long rssi_val_min = 0;
179
180 if ((dm_digtable.curmultista_connectstate == DIG_MULTISTA_CONNECT) &&
181 (dm_digtable.cursta_connectctate == DIG_STA_CONNECT)) {
182 if (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb != 0)
183 rssi_val_min =
184 (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb >
185 rtlpriv->dm.undecorated_smoothed_pwdb) ?
186 rtlpriv->dm.undecorated_smoothed_pwdb :
187 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
188 else
189 rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
190 } else if (dm_digtable.cursta_connectctate == DIG_STA_CONNECT ||
191 dm_digtable.cursta_connectctate == DIG_STA_BEFORE_CONNECT) {
192 rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
193 } else if (dm_digtable.curmultista_connectstate ==
194 DIG_MULTISTA_CONNECT) {
195 rssi_val_min = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
196 }
197
198 return (u8) rssi_val_min;
199}
200
201static void rtl92c_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
202{
203 u32 ret_value;
204 struct rtl_priv *rtlpriv = rtl_priv(hw);
205 struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
206
207 ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, MASKDWORD);
208 falsealm_cnt->cnt_parity_fail = ((ret_value & 0xffff0000) >> 16);
209
210 ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, MASKDWORD);
211 falsealm_cnt->cnt_rate_illegal = (ret_value & 0xffff);
212 falsealm_cnt->cnt_crc8_fail = ((ret_value & 0xffff0000) >> 16);
213
214 ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD);
215 falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff);
216 falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail +
217 falsealm_cnt->cnt_rate_illegal +
218 falsealm_cnt->cnt_crc8_fail + falsealm_cnt->cnt_mcs_fail;
219
220 rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(14), 1);
221 ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, MASKBYTE0);
222 falsealm_cnt->cnt_cck_fail = ret_value;
223
224 ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERUPPER, MASKBYTE3);
225 falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8;
226 falsealm_cnt->cnt_all = (falsealm_cnt->cnt_parity_fail +
227 falsealm_cnt->cnt_rate_illegal +
228 falsealm_cnt->cnt_crc8_fail +
229 falsealm_cnt->cnt_mcs_fail +
230 falsealm_cnt->cnt_cck_fail);
231
232 rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 1);
233 rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 0);
234 rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 0);
235 rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 2);
236
237 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
238 ("cnt_parity_fail = %d, cnt_rate_illegal = %d, "
239 "cnt_crc8_fail = %d, cnt_mcs_fail = %d\n",
240 falsealm_cnt->cnt_parity_fail,
241 falsealm_cnt->cnt_rate_illegal,
242 falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail));
243
244 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
245 ("cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n",
246 falsealm_cnt->cnt_ofdm_fail,
247 falsealm_cnt->cnt_cck_fail, falsealm_cnt->cnt_all));
248}
249
250static void rtl92c_dm_ctrl_initgain_by_fa(struct ieee80211_hw *hw)
251{
252 struct rtl_priv *rtlpriv = rtl_priv(hw);
253 u8 value_igi = dm_digtable.cur_igvalue;
254
255 if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH0)
256 value_igi--;
257 else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH1)
258 value_igi += 0;
259 else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH2)
260 value_igi++;
261 else if (rtlpriv->falsealm_cnt.cnt_all >= DM_DIG_FA_TH2)
262 value_igi += 2;
263 if (value_igi > DM_DIG_FA_UPPER)
264 value_igi = DM_DIG_FA_UPPER;
265 else if (value_igi < DM_DIG_FA_LOWER)
266 value_igi = DM_DIG_FA_LOWER;
267 if (rtlpriv->falsealm_cnt.cnt_all > 10000)
268 value_igi = 0x32;
269
270 dm_digtable.cur_igvalue = value_igi;
271 rtl92c_dm_write_dig(hw);
272}
273
274static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw)
275{
276 struct rtl_priv *rtlpriv = rtl_priv(hw);
277
278 if (rtlpriv->falsealm_cnt.cnt_all > dm_digtable.fa_highthresh) {
279 if ((dm_digtable.backoff_val - 2) <
280 dm_digtable.backoff_val_range_min)
281 dm_digtable.backoff_val =
282 dm_digtable.backoff_val_range_min;
283 else
284 dm_digtable.backoff_val -= 2;
285 } else if (rtlpriv->falsealm_cnt.cnt_all < dm_digtable.fa_lowthresh) {
286 if ((dm_digtable.backoff_val + 2) >
287 dm_digtable.backoff_val_range_max)
288 dm_digtable.backoff_val =
289 dm_digtable.backoff_val_range_max;
290 else
291 dm_digtable.backoff_val += 2;
292 }
293
294 if ((dm_digtable.rssi_val_min + 10 - dm_digtable.backoff_val) >
295 dm_digtable.rx_gain_range_max)
296 dm_digtable.cur_igvalue = dm_digtable.rx_gain_range_max;
297 else if ((dm_digtable.rssi_val_min + 10 -
298 dm_digtable.backoff_val) < dm_digtable.rx_gain_range_min)
299 dm_digtable.cur_igvalue = dm_digtable.rx_gain_range_min;
300 else
301 dm_digtable.cur_igvalue = dm_digtable.rssi_val_min + 10 -
302 dm_digtable.backoff_val;
303
304 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
305 ("rssi_val_min = %x backoff_val %x\n",
306 dm_digtable.rssi_val_min, dm_digtable.backoff_val));
307
308 rtl92c_dm_write_dig(hw);
309}
310
311static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw)
312{
313 static u8 binitialized; /* initialized to false */
314 struct rtl_priv *rtlpriv = rtl_priv(hw);
315 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
316 long rssi_strength = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
317 bool b_multi_sta = false;
318
319 if (mac->opmode == NL80211_IFTYPE_ADHOC)
320 b_multi_sta = true;
321
322 if ((b_multi_sta == false) || (dm_digtable.cursta_connectctate !=
323 DIG_STA_DISCONNECT)) {
324 binitialized = false;
325 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
326 return;
327 } else if (binitialized == false) {
328 binitialized = true;
329 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_0;
330 dm_digtable.cur_igvalue = 0x20;
331 rtl92c_dm_write_dig(hw);
332 }
333
334 if (dm_digtable.curmultista_connectstate == DIG_MULTISTA_CONNECT) {
335 if ((rssi_strength < dm_digtable.rssi_lowthresh) &&
336 (dm_digtable.dig_ext_port_stage != DIG_EXT_PORT_STAGE_1)) {
337
338 if (dm_digtable.dig_ext_port_stage ==
339 DIG_EXT_PORT_STAGE_2) {
340 dm_digtable.cur_igvalue = 0x20;
341 rtl92c_dm_write_dig(hw);
342 }
343
344 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_1;
345 } else if (rssi_strength > dm_digtable.rssi_highthresh) {
346 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_2;
347 rtl92c_dm_ctrl_initgain_by_fa(hw);
348 }
349 } else if (dm_digtable.dig_ext_port_stage != DIG_EXT_PORT_STAGE_0) {
350 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_0;
351 dm_digtable.cur_igvalue = 0x20;
352 rtl92c_dm_write_dig(hw);
353 }
354
355 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
356 ("curmultista_connectstate = "
357 "%x dig_ext_port_stage %x\n",
358 dm_digtable.curmultista_connectstate,
359 dm_digtable.dig_ext_port_stage));
360}
361
362static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw)
363{
364 struct rtl_priv *rtlpriv = rtl_priv(hw);
365
366 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
367 ("presta_connectstate = %x,"
368 " cursta_connectctate = %x\n",
369 dm_digtable.presta_connectstate,
370 dm_digtable.cursta_connectctate));
371
372 if (dm_digtable.presta_connectstate == dm_digtable.cursta_connectctate
373 || dm_digtable.cursta_connectctate == DIG_STA_BEFORE_CONNECT
374 || dm_digtable.cursta_connectctate == DIG_STA_CONNECT) {
375
376 if (dm_digtable.cursta_connectctate != DIG_STA_DISCONNECT) {
377 dm_digtable.rssi_val_min =
378 rtl92c_dm_initial_gain_min_pwdb(hw);
379 rtl92c_dm_ctrl_initgain_by_rssi(hw);
380 }
381 } else {
382 dm_digtable.rssi_val_min = 0;
383 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
384 dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT;
385 dm_digtable.cur_igvalue = 0x20;
386 dm_digtable.pre_igvalue = 0;
387 rtl92c_dm_write_dig(hw);
388 }
389}
390
391static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
392{
393 struct rtl_priv *rtlpriv = rtl_priv(hw);
394 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
395
396 if (dm_digtable.cursta_connectctate == DIG_STA_CONNECT) {
397 dm_digtable.rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw);
398
399 if (dm_digtable.pre_cck_pd_state == CCK_PD_STAGE_LowRssi) {
400 if (dm_digtable.rssi_val_min <= 25)
401 dm_digtable.cur_cck_pd_state =
402 CCK_PD_STAGE_LowRssi;
403 else
404 dm_digtable.cur_cck_pd_state =
405 CCK_PD_STAGE_HighRssi;
406 } else {
407 if (dm_digtable.rssi_val_min <= 20)
408 dm_digtable.cur_cck_pd_state =
409 CCK_PD_STAGE_LowRssi;
410 else
411 dm_digtable.cur_cck_pd_state =
412 CCK_PD_STAGE_HighRssi;
413 }
414 } else {
415 dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX;
416 }
417
418 if (dm_digtable.pre_cck_pd_state != dm_digtable.cur_cck_pd_state) {
419 if (dm_digtable.cur_cck_pd_state == CCK_PD_STAGE_LowRssi) {
420 if (rtlpriv->falsealm_cnt.cnt_cck_fail > 800)
421 dm_digtable.cur_cck_fa_state =
422 CCK_FA_STAGE_High;
423 else
424 dm_digtable.cur_cck_fa_state = CCK_FA_STAGE_Low;
425
426 if (dm_digtable.pre_cck_fa_state !=
427 dm_digtable.cur_cck_fa_state) {
428 if (dm_digtable.cur_cck_fa_state ==
429 CCK_FA_STAGE_Low)
430 rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2,
431 0x83);
432 else
433 rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2,
434 0xcd);
435
436 dm_digtable.pre_cck_fa_state =
437 dm_digtable.cur_cck_fa_state;
438 }
439
440 rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x40);
441
442 if (IS_92C_SERIAL(rtlhal->version))
443 rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT,
444 MASKBYTE2, 0xd7);
445 } else {
446 rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd);
447 rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x47);
448
449 if (IS_92C_SERIAL(rtlhal->version))
450 rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT,
451 MASKBYTE2, 0xd3);
452 }
453 dm_digtable.pre_cck_pd_state = dm_digtable.cur_cck_pd_state;
454 }
455
456 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
457 ("CCKPDStage=%x\n", dm_digtable.cur_cck_pd_state));
458
459 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
460 ("is92C=%x\n", IS_92C_SERIAL(rtlhal->version)));
461}
462
463static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw)
464{
465 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
466
467 if (mac->act_scanning == true)
468 return;
469
470 if ((mac->link_state > MAC80211_NOLINK) &&
471 (mac->link_state < MAC80211_LINKED))
472 dm_digtable.cursta_connectctate = DIG_STA_BEFORE_CONNECT;
473 else if (mac->link_state >= MAC80211_LINKED)
474 dm_digtable.cursta_connectctate = DIG_STA_CONNECT;
475 else
476 dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT;
477
478 rtl92c_dm_initial_gain_sta(hw);
479 rtl92c_dm_initial_gain_multi_sta(hw);
480 rtl92c_dm_cck_packet_detection_thresh(hw);
481
482 dm_digtable.presta_connectstate = dm_digtable.cursta_connectctate;
483
484}
485
486static void rtl92c_dm_dig(struct ieee80211_hw *hw)
487{
488 struct rtl_priv *rtlpriv = rtl_priv(hw);
489
490 if (rtlpriv->dm.b_dm_initialgain_enable == false)
491 return;
492 if (dm_digtable.dig_enable_flag == false)
493 return;
494
495 rtl92c_dm_ctrl_initgain_by_twoport(hw);
496
497}
498
499static void rtl92c_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
500{
501 struct rtl_priv *rtlpriv = rtl_priv(hw);
502
503 rtlpriv->dm.bdynamic_txpower_enable = false;
504
505 rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
506 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
507}
508
509static void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw)
510{ 41{
511 struct rtl_priv *rtlpriv = rtl_priv(hw); 42 struct rtl_priv *rtlpriv = rtl_priv(hw);
512 struct rtl_phy *rtlphy = &(rtlpriv->phy); 43 struct rtl_phy *rtlphy = &(rtlpriv->phy);
513 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 44 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
514 long undecorated_smoothed_pwdb; 45 long undecorated_smoothed_pwdb;
515 46
516 if (!rtlpriv->dm.bdynamic_txpower_enable) 47 if (!rtlpriv->dm.dynamic_txpower_enable)
517 return; 48 return;
518 49
519 if (rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) { 50 if (rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) {
@@ -584,890 +115,4 @@ static void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw)
584 rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl; 115 rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl;
585} 116}
586 117
587void rtl92c_dm_write_dig(struct ieee80211_hw *hw)
588{
589 struct rtl_priv *rtlpriv = rtl_priv(hw);
590
591 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
592 ("cur_igvalue = 0x%x, "
593 "pre_igvalue = 0x%x, backoff_val = %d\n",
594 dm_digtable.cur_igvalue, dm_digtable.pre_igvalue,
595 dm_digtable.backoff_val));
596
597 if (dm_digtable.pre_igvalue != dm_digtable.cur_igvalue) {
598 rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f,
599 dm_digtable.cur_igvalue);
600 rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f,
601 dm_digtable.cur_igvalue);
602
603 dm_digtable.pre_igvalue = dm_digtable.cur_igvalue;
604 }
605}
606
607static void rtl92c_dm_pwdb_monitor(struct ieee80211_hw *hw)
608{
609 struct rtl_priv *rtlpriv = rtl_priv(hw);
610 long tmpentry_max_pwdb = 0, tmpentry_min_pwdb = 0xff;
611
612 u8 h2c_parameter[3] = { 0 };
613
614 return;
615
616 if (tmpentry_max_pwdb != 0) {
617 rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb =
618 tmpentry_max_pwdb;
619 } else {
620 rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb = 0;
621 }
622
623 if (tmpentry_min_pwdb != 0xff) {
624 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb =
625 tmpentry_min_pwdb;
626 } else {
627 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb = 0;
628 }
629
630 h2c_parameter[2] = (u8) (rtlpriv->dm.undecorated_smoothed_pwdb & 0xFF);
631 h2c_parameter[0] = 0;
632
633 rtl92c_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, h2c_parameter);
634}
635
636void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw)
637{
638 struct rtl_priv *rtlpriv = rtl_priv(hw);
639 rtlpriv->dm.bcurrent_turbo_edca = false;
640 rtlpriv->dm.bis_any_nonbepkts = false;
641 rtlpriv->dm.bis_cur_rdlstate = false;
642}
643
644static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw)
645{
646 struct rtl_priv *rtlpriv = rtl_priv(hw);
647 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
648 static u64 last_txok_cnt;
649 static u64 last_rxok_cnt;
650 u64 cur_txok_cnt;
651 u64 cur_rxok_cnt;
652 u32 edca_be_ul = 0x5ea42b;
653 u32 edca_be_dl = 0x5ea42b;
654
655 if (mac->opmode == NL80211_IFTYPE_ADHOC)
656 goto dm_checkedcaturbo_exit;
657
658 if (mac->link_state != MAC80211_LINKED) {
659 rtlpriv->dm.bcurrent_turbo_edca = false;
660 return;
661 }
662
663 if (!mac->ht_enable) { /*FIX MERGE */
664 if (!(edca_be_ul & 0xffff0000))
665 edca_be_ul |= 0x005e0000;
666
667 if (!(edca_be_dl & 0xffff0000))
668 edca_be_dl |= 0x005e0000;
669 }
670
671 if ((!rtlpriv->dm.bis_any_nonbepkts) &&
672 (!rtlpriv->dm.b_disable_framebursting)) {
673 cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt;
674 cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt;
675 if (cur_rxok_cnt > 4 * cur_txok_cnt) {
676 if (!rtlpriv->dm.bis_cur_rdlstate ||
677 !rtlpriv->dm.bcurrent_turbo_edca) {
678 rtl_write_dword(rtlpriv,
679 REG_EDCA_BE_PARAM,
680 edca_be_dl);
681 rtlpriv->dm.bis_cur_rdlstate = true;
682 }
683 } else {
684 if (rtlpriv->dm.bis_cur_rdlstate ||
685 !rtlpriv->dm.bcurrent_turbo_edca) {
686 rtl_write_dword(rtlpriv,
687 REG_EDCA_BE_PARAM,
688 edca_be_ul);
689 rtlpriv->dm.bis_cur_rdlstate = false;
690 }
691 }
692 rtlpriv->dm.bcurrent_turbo_edca = true;
693 } else {
694 if (rtlpriv->dm.bcurrent_turbo_edca) {
695 u8 tmp = AC0_BE;
696 rtlpriv->cfg->ops->set_hw_reg(hw,
697 HW_VAR_AC_PARAM,
698 (u8 *) (&tmp));
699 rtlpriv->dm.bcurrent_turbo_edca = false;
700 }
701 }
702
703dm_checkedcaturbo_exit:
704 rtlpriv->dm.bis_any_nonbepkts = false;
705 last_txok_cnt = rtlpriv->stats.txbytesunicast;
706 last_rxok_cnt = rtlpriv->stats.rxbytesunicast;
707}
708
709static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
710 *hw)
711{
712 struct rtl_priv *rtlpriv = rtl_priv(hw);
713 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
714 struct rtl_phy *rtlphy = &(rtlpriv->phy);
715 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
716 u8 thermalvalue, delta, delta_lck, delta_iqk;
717 long ele_a, ele_d, temp_cck, val_x, value32;
718 long val_y, ele_c;
719 u8 ofdm_index[2], cck_index, ofdm_index_old[2], cck_index_old;
720 int i;
721 bool is2t = IS_92C_SERIAL(rtlhal->version);
722 u8 txpwr_level[2] = {0, 0};
723 u8 ofdm_min_index = 6, rf;
724
725 rtlpriv->dm.btxpower_trackingInit = true;
726 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
727 ("rtl92c_dm_txpower_tracking_callback_thermalmeter\n"));
728
729 thermalvalue = (u8) rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0x1f);
730
731 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
732 ("Readback Thermal Meter = 0x%x pre thermal meter 0x%x "
733 "eeprom_thermalmeter 0x%x\n",
734 thermalvalue, rtlpriv->dm.thermalvalue,
735 rtlefuse->eeprom_thermalmeter));
736
737 rtl92c_phy_ap_calibrate(hw, (thermalvalue -
738 rtlefuse->eeprom_thermalmeter));
739 if (is2t)
740 rf = 2;
741 else
742 rf = 1;
743
744 if (thermalvalue) {
745 ele_d = rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
746 MASKDWORD) & MASKOFDM_D;
747
748 for (i = 0; i < OFDM_TABLE_LENGTH; i++) {
749 if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
750 ofdm_index_old[0] = (u8) i;
751
752 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
753 ("Initial pathA ele_d reg0x%x = 0x%lx, "
754 "ofdm_index=0x%x\n",
755 ROFDM0_XATXIQIMBALANCE,
756 ele_d, ofdm_index_old[0]));
757 break;
758 }
759 }
760
761 if (is2t) {
762 ele_d = rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE,
763 MASKDWORD) & MASKOFDM_D;
764
765 for (i = 0; i < OFDM_TABLE_LENGTH; i++) {
766 if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
767 ofdm_index_old[1] = (u8) i;
768
769 RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
770 DBG_LOUD,
771 ("Initial pathB ele_d reg0x%x = "
772 "0x%lx, ofdm_index=0x%x\n",
773 ROFDM0_XBTXIQIMBALANCE, ele_d,
774 ofdm_index_old[1]));
775 break;
776 }
777 }
778 }
779
780 temp_cck =
781 rtl_get_bbreg(hw, RCCK0_TXFILTER2, MASKDWORD) & MASKCCK;
782
783 for (i = 0; i < CCK_TABLE_LENGTH; i++) {
784 if (rtlpriv->dm.b_cck_inch14) {
785 if (memcmp((void *)&temp_cck,
786 (void *)&cckswing_table_ch14[i][2],
787 4) == 0) {
788 cck_index_old = (u8) i;
789
790 RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
791 DBG_LOUD,
792 ("Initial reg0x%x = 0x%lx, "
793 "cck_index=0x%x, ch 14 %d\n",
794 RCCK0_TXFILTER2, temp_cck,
795 cck_index_old,
796 rtlpriv->dm.b_cck_inch14));
797 break;
798 }
799 } else {
800 if (memcmp((void *)&temp_cck,
801 (void *)
802 &cckswing_table_ch1ch13[i][2],
803 4) == 0) {
804 cck_index_old = (u8) i;
805 118
806 RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
807 DBG_LOUD,
808 ("Initial reg0x%x = 0x%lx, "
809 "cck_index=0x%x, ch14 %d\n",
810 RCCK0_TXFILTER2, temp_cck,
811 cck_index_old,
812 rtlpriv->dm.b_cck_inch14));
813 break;
814 }
815 }
816 }
817
818 if (!rtlpriv->dm.thermalvalue) {
819 rtlpriv->dm.thermalvalue =
820 rtlefuse->eeprom_thermalmeter;
821 rtlpriv->dm.thermalvalue_lck = thermalvalue;
822 rtlpriv->dm.thermalvalue_iqk = thermalvalue;
823 for (i = 0; i < rf; i++)
824 rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i];
825 rtlpriv->dm.cck_index = cck_index_old;
826 }
827
828 delta = (thermalvalue > rtlpriv->dm.thermalvalue) ?
829 (thermalvalue - rtlpriv->dm.thermalvalue) :
830 (rtlpriv->dm.thermalvalue - thermalvalue);
831
832 delta_lck = (thermalvalue > rtlpriv->dm.thermalvalue_lck) ?
833 (thermalvalue - rtlpriv->dm.thermalvalue_lck) :
834 (rtlpriv->dm.thermalvalue_lck - thermalvalue);
835
836 delta_iqk = (thermalvalue > rtlpriv->dm.thermalvalue_iqk) ?
837 (thermalvalue - rtlpriv->dm.thermalvalue_iqk) :
838 (rtlpriv->dm.thermalvalue_iqk - thermalvalue);
839
840 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
841 ("Readback Thermal Meter = 0x%x pre thermal meter 0x%x "
842 "eeprom_thermalmeter 0x%x delta 0x%x "
843 "delta_lck 0x%x delta_iqk 0x%x\n",
844 thermalvalue, rtlpriv->dm.thermalvalue,
845 rtlefuse->eeprom_thermalmeter, delta, delta_lck,
846 delta_iqk));
847
848 if (delta_lck > 1) {
849 rtlpriv->dm.thermalvalue_lck = thermalvalue;
850 rtl92c_phy_lc_calibrate(hw);
851 }
852
853 if (delta > 0 && rtlpriv->dm.txpower_track_control) {
854 if (thermalvalue > rtlpriv->dm.thermalvalue) {
855 for (i = 0; i < rf; i++)
856 rtlpriv->dm.ofdm_index[i] -= delta;
857 rtlpriv->dm.cck_index -= delta;
858 } else {
859 for (i = 0; i < rf; i++)
860 rtlpriv->dm.ofdm_index[i] += delta;
861 rtlpriv->dm.cck_index += delta;
862 }
863
864 if (is2t) {
865 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
866 ("temp OFDM_A_index=0x%x, "
867 "OFDM_B_index=0x%x,"
868 "cck_index=0x%x\n",
869 rtlpriv->dm.ofdm_index[0],
870 rtlpriv->dm.ofdm_index[1],
871 rtlpriv->dm.cck_index));
872 } else {
873 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
874 ("temp OFDM_A_index=0x%x,"
875 "cck_index=0x%x\n",
876 rtlpriv->dm.ofdm_index[0],
877 rtlpriv->dm.cck_index));
878 }
879
880 if (thermalvalue > rtlefuse->eeprom_thermalmeter) {
881 for (i = 0; i < rf; i++)
882 ofdm_index[i] =
883 rtlpriv->dm.ofdm_index[i]
884 + 1;
885 cck_index = rtlpriv->dm.cck_index + 1;
886 } else {
887 for (i = 0; i < rf; i++)
888 ofdm_index[i] =
889 rtlpriv->dm.ofdm_index[i];
890 cck_index = rtlpriv->dm.cck_index;
891 }
892
893 for (i = 0; i < rf; i++) {
894 if (txpwr_level[i] >= 0 &&
895 txpwr_level[i] <= 26) {
896 if (thermalvalue >
897 rtlefuse->eeprom_thermalmeter) {
898 if (delta < 5)
899 ofdm_index[i] -= 1;
900
901 else
902 ofdm_index[i] -= 2;
903 } else if (delta > 5 && thermalvalue <
904 rtlefuse->
905 eeprom_thermalmeter) {
906 ofdm_index[i] += 1;
907 }
908 } else if (txpwr_level[i] >= 27 &&
909 txpwr_level[i] <= 32
910 && thermalvalue >
911 rtlefuse->eeprom_thermalmeter) {
912 if (delta < 5)
913 ofdm_index[i] -= 1;
914
915 else
916 ofdm_index[i] -= 2;
917 } else if (txpwr_level[i] >= 32 &&
918 txpwr_level[i] <= 38 &&
919 thermalvalue >
920 rtlefuse->eeprom_thermalmeter
921 && delta > 5) {
922 ofdm_index[i] -= 1;
923 }
924 }
925
926 if (txpwr_level[i] >= 0 && txpwr_level[i] <= 26) {
927 if (thermalvalue >
928 rtlefuse->eeprom_thermalmeter) {
929 if (delta < 5)
930 cck_index -= 1;
931
932 else
933 cck_index -= 2;
934 } else if (delta > 5 && thermalvalue <
935 rtlefuse->eeprom_thermalmeter) {
936 cck_index += 1;
937 }
938 } else if (txpwr_level[i] >= 27 &&
939 txpwr_level[i] <= 32 &&
940 thermalvalue >
941 rtlefuse->eeprom_thermalmeter) {
942 if (delta < 5)
943 cck_index -= 1;
944
945 else
946 cck_index -= 2;
947 } else if (txpwr_level[i] >= 32 &&
948 txpwr_level[i] <= 38 &&
949 thermalvalue > rtlefuse->eeprom_thermalmeter
950 && delta > 5) {
951 cck_index -= 1;
952 }
953
954 for (i = 0; i < rf; i++) {
955 if (ofdm_index[i] > OFDM_TABLE_SIZE - 1)
956 ofdm_index[i] = OFDM_TABLE_SIZE - 1;
957
958 else if (ofdm_index[i] < ofdm_min_index)
959 ofdm_index[i] = ofdm_min_index;
960 }
961
962 if (cck_index > CCK_TABLE_SIZE - 1)
963 cck_index = CCK_TABLE_SIZE - 1;
964 else if (cck_index < 0)
965 cck_index = 0;
966
967 if (is2t) {
968 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
969 ("new OFDM_A_index=0x%x, "
970 "OFDM_B_index=0x%x,"
971 "cck_index=0x%x\n",
972 ofdm_index[0], ofdm_index[1],
973 cck_index));
974 } else {
975 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
976 ("new OFDM_A_index=0x%x,"
977 "cck_index=0x%x\n",
978 ofdm_index[0], cck_index));
979 }
980 }
981
982 if (rtlpriv->dm.txpower_track_control && delta != 0) {
983 ele_d =
984 (ofdmswing_table[ofdm_index[0]] & 0xFFC00000) >> 22;
985 val_x = rtlphy->reg_e94;
986 val_y = rtlphy->reg_e9c;
987
988 if (val_x != 0) {
989 if ((val_x & 0x00000200) != 0)
990 val_x = val_x | 0xFFFFFC00;
991 ele_a = ((val_x * ele_d) >> 8) & 0x000003FF;
992
993 if ((val_y & 0x00000200) != 0)
994 val_y = val_y | 0xFFFFFC00;
995 ele_c = ((val_y * ele_d) >> 8) & 0x000003FF;
996
997 value32 = (ele_d << 22) |
998 ((ele_c & 0x3F) << 16) | ele_a;
999
1000 rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
1001 MASKDWORD, value32);
1002
1003 value32 = (ele_c & 0x000003C0) >> 6;
1004 rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS,
1005 value32);
1006
1007 value32 = ((val_x * ele_d) >> 7) & 0x01;
1008 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
1009 BIT(31), value32);
1010
1011 value32 = ((val_y * ele_d) >> 7) & 0x01;
1012 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
1013 BIT(29), value32);
1014 } else {
1015 rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
1016 MASKDWORD,
1017 ofdmswing_table[ofdm_index[0]]);
1018
1019 rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS,
1020 0x00);
1021 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
1022 BIT(31) | BIT(29), 0x00);
1023 }
1024
1025 if (!rtlpriv->dm.b_cck_inch14) {
1026 rtl_write_byte(rtlpriv, 0xa22,
1027 cckswing_table_ch1ch13[cck_index]
1028 [0]);
1029 rtl_write_byte(rtlpriv, 0xa23,
1030 cckswing_table_ch1ch13[cck_index]
1031 [1]);
1032 rtl_write_byte(rtlpriv, 0xa24,
1033 cckswing_table_ch1ch13[cck_index]
1034 [2]);
1035 rtl_write_byte(rtlpriv, 0xa25,
1036 cckswing_table_ch1ch13[cck_index]
1037 [3]);
1038 rtl_write_byte(rtlpriv, 0xa26,
1039 cckswing_table_ch1ch13[cck_index]
1040 [4]);
1041 rtl_write_byte(rtlpriv, 0xa27,
1042 cckswing_table_ch1ch13[cck_index]
1043 [5]);
1044 rtl_write_byte(rtlpriv, 0xa28,
1045 cckswing_table_ch1ch13[cck_index]
1046 [6]);
1047 rtl_write_byte(rtlpriv, 0xa29,
1048 cckswing_table_ch1ch13[cck_index]
1049 [7]);
1050 } else {
1051 rtl_write_byte(rtlpriv, 0xa22,
1052 cckswing_table_ch14[cck_index]
1053 [0]);
1054 rtl_write_byte(rtlpriv, 0xa23,
1055 cckswing_table_ch14[cck_index]
1056 [1]);
1057 rtl_write_byte(rtlpriv, 0xa24,
1058 cckswing_table_ch14[cck_index]
1059 [2]);
1060 rtl_write_byte(rtlpriv, 0xa25,
1061 cckswing_table_ch14[cck_index]
1062 [3]);
1063 rtl_write_byte(rtlpriv, 0xa26,
1064 cckswing_table_ch14[cck_index]
1065 [4]);
1066 rtl_write_byte(rtlpriv, 0xa27,
1067 cckswing_table_ch14[cck_index]
1068 [5]);
1069 rtl_write_byte(rtlpriv, 0xa28,
1070 cckswing_table_ch14[cck_index]
1071 [6]);
1072 rtl_write_byte(rtlpriv, 0xa29,
1073 cckswing_table_ch14[cck_index]
1074 [7]);
1075 }
1076
1077 if (is2t) {
1078 ele_d = (ofdmswing_table[ofdm_index[1]] &
1079 0xFFC00000) >> 22;
1080
1081 val_x = rtlphy->reg_eb4;
1082 val_y = rtlphy->reg_ebc;
1083
1084 if (val_x != 0) {
1085 if ((val_x & 0x00000200) != 0)
1086 val_x = val_x | 0xFFFFFC00;
1087 ele_a = ((val_x * ele_d) >> 8) &
1088 0x000003FF;
1089
1090 if ((val_y & 0x00000200) != 0)
1091 val_y = val_y | 0xFFFFFC00;
1092 ele_c = ((val_y * ele_d) >> 8) &
1093 0x00003FF;
1094
1095 value32 = (ele_d << 22) |
1096 ((ele_c & 0x3F) << 16) | ele_a;
1097 rtl_set_bbreg(hw,
1098 ROFDM0_XBTXIQIMBALANCE,
1099 MASKDWORD, value32);
1100
1101 value32 = (ele_c & 0x000003C0) >> 6;
1102 rtl_set_bbreg(hw, ROFDM0_XDTXAFE,
1103 MASKH4BITS, value32);
1104
1105 value32 = ((val_x * ele_d) >> 7) & 0x01;
1106 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
1107 BIT(27), value32);
1108
1109 value32 = ((val_y * ele_d) >> 7) & 0x01;
1110 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
1111 BIT(25), value32);
1112 } else {
1113 rtl_set_bbreg(hw,
1114 ROFDM0_XBTXIQIMBALANCE,
1115 MASKDWORD,
1116 ofdmswing_table[ofdm_index
1117 [1]]);
1118 rtl_set_bbreg(hw, ROFDM0_XDTXAFE,
1119 MASKH4BITS, 0x00);
1120 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
1121 BIT(27) | BIT(25), 0x00);
1122 }
1123
1124 }
1125 }
1126
1127 if (delta_iqk > 3) {
1128 rtlpriv->dm.thermalvalue_iqk = thermalvalue;
1129 rtl92c_phy_iq_calibrate(hw, false);
1130 }
1131
1132 if (rtlpriv->dm.txpower_track_control)
1133 rtlpriv->dm.thermalvalue = thermalvalue;
1134 }
1135
1136 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, ("<===\n"));
1137
1138}
1139
1140static void rtl92c_dm_initialize_txpower_tracking_thermalmeter(
1141 struct ieee80211_hw *hw)
1142{
1143 struct rtl_priv *rtlpriv = rtl_priv(hw);
1144
1145 rtlpriv->dm.btxpower_tracking = true;
1146 rtlpriv->dm.btxpower_trackingInit = false;
1147
1148 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
1149 ("pMgntInfo->btxpower_tracking = %d\n",
1150 rtlpriv->dm.btxpower_tracking));
1151}
1152
1153static void rtl92c_dm_initialize_txpower_tracking(struct ieee80211_hw *hw)
1154{
1155 rtl92c_dm_initialize_txpower_tracking_thermalmeter(hw);
1156}
1157
1158static void rtl92c_dm_txpower_tracking_directcall(struct ieee80211_hw *hw)
1159{
1160 rtl92c_dm_txpower_tracking_callback_thermalmeter(hw);
1161}
1162
1163static void rtl92c_dm_check_txpower_tracking_thermal_meter(
1164 struct ieee80211_hw *hw)
1165{
1166 struct rtl_priv *rtlpriv = rtl_priv(hw);
1167 static u8 tm_trigger;
1168
1169 if (!rtlpriv->dm.btxpower_tracking)
1170 return;
1171
1172 if (!tm_trigger) {
1173 rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, RFREG_OFFSET_MASK,
1174 0x60);
1175 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
1176 ("Trigger 92S Thermal Meter!!\n"));
1177 tm_trigger = 1;
1178 return;
1179 } else {
1180 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
1181 ("Schedule TxPowerTracking direct call!!\n"));
1182 rtl92c_dm_txpower_tracking_directcall(hw);
1183 tm_trigger = 0;
1184 }
1185}
1186
1187void rtl92c_dm_check_txpower_tracking(struct ieee80211_hw *hw)
1188{
1189 rtl92c_dm_check_txpower_tracking_thermal_meter(hw);
1190}
1191
1192void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
1193{
1194 struct rtl_priv *rtlpriv = rtl_priv(hw);
1195 struct rate_adaptive *p_ra = &(rtlpriv->ra);
1196
1197 p_ra->ratr_state = DM_RATR_STA_INIT;
1198 p_ra->pre_ratr_state = DM_RATR_STA_INIT;
1199
1200 if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER)
1201 rtlpriv->dm.b_useramask = true;
1202 else
1203 rtlpriv->dm.b_useramask = false;
1204
1205}
1206
1207static void rtl92c_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
1208{
1209 struct rtl_priv *rtlpriv = rtl_priv(hw);
1210 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1211 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1212 struct rate_adaptive *p_ra = &(rtlpriv->ra);
1213 u32 low_rssithresh_for_ra, high_rssithresh_for_ra;
1214
1215 if (is_hal_stop(rtlhal)) {
1216 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
1217 ("<---- driver is going to unload\n"));
1218 return;
1219 }
1220
1221 if (!rtlpriv->dm.b_useramask) {
1222 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
1223 ("<---- driver does not control rate adaptive mask\n"));
1224 return;
1225 }
1226
1227 if (mac->link_state == MAC80211_LINKED) {
1228
1229 switch (p_ra->pre_ratr_state) {
1230 case DM_RATR_STA_HIGH:
1231 high_rssithresh_for_ra = 50;
1232 low_rssithresh_for_ra = 20;
1233 break;
1234 case DM_RATR_STA_MIDDLE:
1235 high_rssithresh_for_ra = 55;
1236 low_rssithresh_for_ra = 20;
1237 break;
1238 case DM_RATR_STA_LOW:
1239 high_rssithresh_for_ra = 50;
1240 low_rssithresh_for_ra = 25;
1241 break;
1242 default:
1243 high_rssithresh_for_ra = 50;
1244 low_rssithresh_for_ra = 20;
1245 break;
1246 }
1247
1248 if (rtlpriv->dm.undecorated_smoothed_pwdb >
1249 (long)high_rssithresh_for_ra)
1250 p_ra->ratr_state = DM_RATR_STA_HIGH;
1251 else if (rtlpriv->dm.undecorated_smoothed_pwdb >
1252 (long)low_rssithresh_for_ra)
1253 p_ra->ratr_state = DM_RATR_STA_MIDDLE;
1254 else
1255 p_ra->ratr_state = DM_RATR_STA_LOW;
1256
1257 if (p_ra->pre_ratr_state != p_ra->ratr_state) {
1258 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
1259 ("RSSI = %ld\n",
1260 rtlpriv->dm.undecorated_smoothed_pwdb));
1261 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
1262 ("RSSI_LEVEL = %d\n", p_ra->ratr_state));
1263 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
1264 ("PreState = %d, CurState = %d\n",
1265 p_ra->pre_ratr_state, p_ra->ratr_state));
1266
1267 rtlpriv->cfg->ops->update_rate_mask(hw,
1268 p_ra->ratr_state);
1269
1270 p_ra->pre_ratr_state = p_ra->ratr_state;
1271 }
1272 }
1273}
1274
1275static void rtl92c_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw)
1276{
1277 dm_pstable.pre_ccastate = CCA_MAX;
1278 dm_pstable.cur_ccasate = CCA_MAX;
1279 dm_pstable.pre_rfstate = RF_MAX;
1280 dm_pstable.cur_rfstate = RF_MAX;
1281 dm_pstable.rssi_val_min = 0;
1282}
1283
1284static void rtl92c_dm_1r_cca(struct ieee80211_hw *hw)
1285{
1286 struct rtl_priv *rtlpriv = rtl_priv(hw);
1287 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1288
1289 if (dm_pstable.rssi_val_min != 0) {
1290 if (dm_pstable.pre_ccastate == CCA_2R) {
1291 if (dm_pstable.rssi_val_min >= 35)
1292 dm_pstable.cur_ccasate = CCA_1R;
1293 else
1294 dm_pstable.cur_ccasate = CCA_2R;
1295 } else {
1296 if (dm_pstable.rssi_val_min <= 30)
1297 dm_pstable.cur_ccasate = CCA_2R;
1298 else
1299 dm_pstable.cur_ccasate = CCA_1R;
1300 }
1301 } else {
1302 dm_pstable.cur_ccasate = CCA_MAX;
1303 }
1304
1305 if (dm_pstable.pre_ccastate != dm_pstable.cur_ccasate) {
1306 if (dm_pstable.cur_ccasate == CCA_1R) {
1307 if (get_rf_type(rtlphy) == RF_2T2R) {
1308 rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE,
1309 MASKBYTE0, 0x13);
1310 rtl_set_bbreg(hw, 0xe70, MASKBYTE3, 0x20);
1311 } else {
1312 rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE,
1313 MASKBYTE0, 0x23);
1314 rtl_set_bbreg(hw, 0xe70, 0x7fc00000, 0x10c);
1315 }
1316 } else {
1317 rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0,
1318 0x33);
1319 rtl_set_bbreg(hw, 0xe70, MASKBYTE3, 0x63);
1320 }
1321 dm_pstable.pre_ccastate = dm_pstable.cur_ccasate;
1322 }
1323
1324 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, ("CCAStage = %s\n",
1325 (dm_pstable.cur_ccasate ==
1326 0) ? "1RCCA" : "2RCCA"));
1327}
1328
1329void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal)
1330{
1331 static u8 initialize;
1332 static u32 reg_874, reg_c70, reg_85c, reg_a74;
1333
1334 if (initialize == 0) {
1335 reg_874 = (rtl_get_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
1336 MASKDWORD) & 0x1CC000) >> 14;
1337
1338 reg_c70 = (rtl_get_bbreg(hw, ROFDM0_AGCPARAMETER1,
1339 MASKDWORD) & BIT(3)) >> 3;
1340
1341 reg_85c = (rtl_get_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL,
1342 MASKDWORD) & 0xFF000000) >> 24;
1343
1344 reg_a74 = (rtl_get_bbreg(hw, 0xa74, MASKDWORD) & 0xF000) >> 12;
1345
1346 initialize = 1;
1347 }
1348
1349 if (!bforce_in_normal) {
1350 if (dm_pstable.rssi_val_min != 0) {
1351 if (dm_pstable.pre_rfstate == RF_NORMAL) {
1352 if (dm_pstable.rssi_val_min >= 30)
1353 dm_pstable.cur_rfstate = RF_SAVE;
1354 else
1355 dm_pstable.cur_rfstate = RF_NORMAL;
1356 } else {
1357 if (dm_pstable.rssi_val_min <= 25)
1358 dm_pstable.cur_rfstate = RF_NORMAL;
1359 else
1360 dm_pstable.cur_rfstate = RF_SAVE;
1361 }
1362 } else {
1363 dm_pstable.cur_rfstate = RF_MAX;
1364 }
1365 } else {
1366 dm_pstable.cur_rfstate = RF_NORMAL;
1367 }
1368
1369 if (dm_pstable.pre_rfstate != dm_pstable.cur_rfstate) {
1370 if (dm_pstable.cur_rfstate == RF_SAVE) {
1371 rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
1372 0x1C0000, 0x2);
1373 rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3), 0);
1374 rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL,
1375 0xFF000000, 0x63);
1376 rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
1377 0xC000, 0x2);
1378 rtl_set_bbreg(hw, 0xa74, 0xF000, 0x3);
1379 rtl_set_bbreg(hw, 0x818, BIT(28), 0x0);
1380 rtl_set_bbreg(hw, 0x818, BIT(28), 0x1);
1381 } else {
1382 rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
1383 0x1CC000, reg_874);
1384 rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3),
1385 reg_c70);
1386 rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL, 0xFF000000,
1387 reg_85c);
1388 rtl_set_bbreg(hw, 0xa74, 0xF000, reg_a74);
1389 rtl_set_bbreg(hw, 0x818, BIT(28), 0x0);
1390 }
1391
1392 dm_pstable.pre_rfstate = dm_pstable.cur_rfstate;
1393 }
1394}
1395
1396static void rtl92c_dm_dynamic_bb_powersaving(struct ieee80211_hw *hw)
1397{
1398 struct rtl_priv *rtlpriv = rtl_priv(hw);
1399 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1400 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1401
1402 if (((mac->link_state == MAC80211_NOLINK)) &&
1403 (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
1404 dm_pstable.rssi_val_min = 0;
1405 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
1406 ("Not connected to any\n"));
1407 }
1408
1409 if (mac->link_state == MAC80211_LINKED) {
1410 if (mac->opmode == NL80211_IFTYPE_ADHOC) {
1411 dm_pstable.rssi_val_min =
1412 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
1413 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
1414 ("AP Client PWDB = 0x%lx\n",
1415 dm_pstable.rssi_val_min));
1416 } else {
1417 dm_pstable.rssi_val_min =
1418 rtlpriv->dm.undecorated_smoothed_pwdb;
1419 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
1420 ("STA Default Port PWDB = 0x%lx\n",
1421 dm_pstable.rssi_val_min));
1422 }
1423 } else {
1424 dm_pstable.rssi_val_min =
1425 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
1426
1427 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
1428 ("AP Ext Port PWDB = 0x%lx\n",
1429 dm_pstable.rssi_val_min));
1430 }
1431
1432 if (IS_92C_SERIAL(rtlhal->version))
1433 rtl92c_dm_1r_cca(hw);
1434}
1435
1436void rtl92c_dm_init(struct ieee80211_hw *hw)
1437{
1438 struct rtl_priv *rtlpriv = rtl_priv(hw);
1439
1440 rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
1441 rtl92c_dm_diginit(hw);
1442 rtl92c_dm_init_dynamic_txpower(hw);
1443 rtl92c_dm_init_edca_turbo(hw);
1444 rtl92c_dm_init_rate_adaptive_mask(hw);
1445 rtl92c_dm_initialize_txpower_tracking(hw);
1446 rtl92c_dm_init_dynamic_bb_powersaving(hw);
1447}
1448
1449void rtl92c_dm_watchdog(struct ieee80211_hw *hw)
1450{
1451 struct rtl_priv *rtlpriv = rtl_priv(hw);
1452 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1453 bool b_fw_current_inpsmode = false;
1454 bool b_fw_ps_awake = true;
1455
1456 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
1457 (u8 *) (&b_fw_current_inpsmode));
1458 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON,
1459 (u8 *) (&b_fw_ps_awake));
1460
1461 if ((ppsc->rfpwr_state == ERFON) && ((!b_fw_current_inpsmode) &&
1462 b_fw_ps_awake)
1463 && (!ppsc->rfchange_inprogress)) {
1464 rtl92c_dm_pwdb_monitor(hw);
1465 rtl92c_dm_dig(hw);
1466 rtl92c_dm_false_alarm_counter_statistics(hw);
1467 rtl92c_dm_dynamic_bb_powersaving(hw);
1468 rtl92c_dm_dynamic_txpower(hw);
1469 rtl92c_dm_check_txpower_tracking(hw);
1470 rtl92c_dm_refresh_rate_adaptive_mask(hw);
1471 rtl92c_dm_check_edca_turbo(hw);
1472 }
1473}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h
index 463439e4074..5911d52a24a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h
@@ -192,5 +192,6 @@ void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw);
192void rtl92c_dm_check_txpower_tracking(struct ieee80211_hw *hw); 192void rtl92c_dm_check_txpower_tracking(struct ieee80211_hw *hw);
193void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw); 193void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw);
194void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal); 194void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal);
195void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw);
195 196
196#endif 197#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/fw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/fw.c
index 11dd22b987e..11c8bdb4af5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/fw.c
@@ -133,17 +133,15 @@ static void _rtl92c_write_fw(struct ieee80211_hw *hw,
133{ 133{
134 struct rtl_priv *rtlpriv = rtl_priv(hw); 134 struct rtl_priv *rtlpriv = rtl_priv(hw);
135 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 135 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
136 bool is_version_b;
137 u8 *bufferPtr = (u8 *) buffer; 136 u8 *bufferPtr = (u8 *) buffer;
138 137
139 RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, ("FW size is %d bytes,\n", size)); 138 RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, ("FW size is %d bytes,\n", size));
140 139
141 is_version_b = IS_CHIP_VER_B(version); 140 if (IS_CHIP_VER_B(version)) {
142 if (is_version_b) {
143 u32 pageNums, remainSize; 141 u32 pageNums, remainSize;
144 u32 page, offset; 142 u32 page, offset;
145 143
146 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CE) 144 if (IS_HARDWARE_TYPE_8192CE(rtlhal))
147 _rtl92c_fill_dummy(bufferPtr, &size); 145 _rtl92c_fill_dummy(bufferPtr, &size);
148 146
149 pageNums = size / FW_8192C_PAGE_SIZE; 147 pageNums = size / FW_8192C_PAGE_SIZE;
@@ -231,14 +229,14 @@ int rtl92c_download_fw(struct ieee80211_hw *hw)
231 u32 fwsize; 229 u32 fwsize;
232 int err; 230 int err;
233 enum version_8192c version = rtlhal->version; 231 enum version_8192c version = rtlhal->version;
232 const struct firmware *firmware;
234 233
235 const struct firmware *firmware = NULL; 234 printk(KERN_INFO "rtl8192cu: Loading firmware file %s\n",
236 235 rtlpriv->cfg->fw_name);
237 err = request_firmware(&firmware, rtlpriv->cfg->fw_name, 236 err = request_firmware(&firmware, rtlpriv->cfg->fw_name,
238 rtlpriv->io.dev); 237 rtlpriv->io.dev);
239 if (err) { 238 if (err) {
240 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 239 printk(KERN_ERR "rtl8192cu: Firmware loading failed\n");
241 ("Failed to request firmware!\n"));
242 return 1; 240 return 1;
243 } 241 }
244 242
@@ -318,12 +316,12 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
318 316
319 while (true) { 317 while (true) {
320 spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag); 318 spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
321 if (rtlhal->b_h2c_setinprogress) { 319 if (rtlhal->h2c_setinprogress) {
322 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, 320 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
323 ("H2C set in progress! Wait to set.." 321 ("H2C set in progress! Wait to set.."
324 "element_id(%d).\n", element_id)); 322 "element_id(%d).\n", element_id));
325 323
326 while (rtlhal->b_h2c_setinprogress) { 324 while (rtlhal->h2c_setinprogress) {
327 spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, 325 spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock,
328 flag); 326 flag);
329 h2c_waitcounter++; 327 h2c_waitcounter++;
@@ -339,7 +337,7 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
339 } 337 }
340 spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag); 338 spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
341 } else { 339 } else {
342 rtlhal->b_h2c_setinprogress = true; 340 rtlhal->h2c_setinprogress = true;
343 spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag); 341 spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
344 break; 342 break;
345 } 343 }
@@ -495,7 +493,7 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
495 } 493 }
496 494
497 spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag); 495 spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
498 rtlhal->b_h2c_setinprogress = false; 496 rtlhal->h2c_setinprogress = false;
499 spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag); 497 spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
500 498
501 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("go out\n")); 499 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("go out\n"));
@@ -507,7 +505,7 @@ void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw,
507 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 505 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
508 u32 tmp_cmdbuf[2]; 506 u32 tmp_cmdbuf[2];
509 507
510 if (rtlhal->bfw_ready == false) { 508 if (rtlhal->fw_ready == false) {
511 RT_ASSERT(false, ("return H2C cmd because of Fw " 509 RT_ASSERT(false, ("return H2C cmd because of Fw "
512 "download fail!!!\n")); 510 "download fail!!!\n"));
513 return; 511 return;
@@ -560,39 +558,6 @@ void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
560 558
561} 559}
562 560
563static bool _rtl92c_cmd_send_packet(struct ieee80211_hw *hw,
564 struct sk_buff *skb)
565{
566 struct rtl_priv *rtlpriv = rtl_priv(hw);
567 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
568 struct rtl8192_tx_ring *ring;
569 struct rtl_tx_desc *pdesc;
570 u8 own;
571 unsigned long flags;
572 struct sk_buff *pskb = NULL;
573
574 ring = &rtlpci->tx_ring[BEACON_QUEUE];
575
576 pskb = __skb_dequeue(&ring->queue);
577 if (pskb)
578 kfree_skb(pskb);
579
580 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
581
582 pdesc = &ring->desc[0];
583 own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc, true, HW_DESC_OWN);
584
585 rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *) pdesc, 1, 1, skb);
586
587 __skb_queue_tail(&ring->queue, skb);
588
589 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
590
591 rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE);
592
593 return true;
594}
595
596#define BEACON_PG 0 /*->1*/ 561#define BEACON_PG 0 /*->1*/
597#define PSPOLL_PG 2 562#define PSPOLL_PG 2
598#define NULL_PG 3 563#define NULL_PG 3
@@ -776,7 +741,7 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
776 memcpy((u8 *) skb_put(skb, totalpacketlen), 741 memcpy((u8 *) skb_put(skb, totalpacketlen),
777 &reserved_page_packet, totalpacketlen); 742 &reserved_page_packet, totalpacketlen);
778 743
779 rtstatus = _rtl92c_cmd_send_packet(hw, skb); 744 rtstatus = rtlpriv->cfg->ops->cmd_send_packet(hw, skb);
780 745
781 if (rtstatus) 746 if (rtstatus)
782 b_dlok = true; 747 b_dlok = true;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
index 1c41a0c9350..0b910921e60 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -124,7 +124,7 @@ void rtl92ce_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
124 break; 124 break;
125 } 125 }
126 case HW_VAR_FW_PSMODE_STATUS: 126 case HW_VAR_FW_PSMODE_STATUS:
127 *((bool *) (val)) = ppsc->b_fw_current_inpsmode; 127 *((bool *) (val)) = ppsc->fw_current_inpsmode;
128 break; 128 break;
129 case HW_VAR_CORRECT_TSF:{ 129 case HW_VAR_CORRECT_TSF:{
130 u64 tsf; 130 u64 tsf;
@@ -173,15 +173,15 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
173 break; 173 break;
174 } 174 }
175 case HW_VAR_BASIC_RATE:{ 175 case HW_VAR_BASIC_RATE:{
176 u16 b_rate_cfg = ((u16 *) val)[0]; 176 u16 rate_cfg = ((u16 *) val)[0];
177 u8 rate_index = 0; 177 u8 rate_index = 0;
178 b_rate_cfg = b_rate_cfg & 0x15f; 178 rate_cfg &= 0x15f;
179 b_rate_cfg |= 0x01; 179 rate_cfg |= 0x01;
180 rtl_write_byte(rtlpriv, REG_RRSR, b_rate_cfg & 0xff); 180 rtl_write_byte(rtlpriv, REG_RRSR, rate_cfg & 0xff);
181 rtl_write_byte(rtlpriv, REG_RRSR + 1, 181 rtl_write_byte(rtlpriv, REG_RRSR + 1,
182 (b_rate_cfg >> 8)&0xff); 182 (rate_cfg >> 8)&0xff);
183 while (b_rate_cfg > 0x1) { 183 while (rate_cfg > 0x1) {
184 b_rate_cfg = (b_rate_cfg >> 1); 184 rate_cfg = (rate_cfg >> 1);
185 rate_index++; 185 rate_index++;
186 } 186 }
187 rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL, 187 rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL,
@@ -318,15 +318,17 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
318 } 318 }
319 case HW_VAR_AC_PARAM:{ 319 case HW_VAR_AC_PARAM:{
320 u8 e_aci = *((u8 *) val); 320 u8 e_aci = *((u8 *) val);
321 u32 u4b_ac_param = 0; 321 u32 u4b_ac_param;
322 u16 cw_min = le16_to_cpu(mac->ac[e_aci].cw_min);
323 u16 cw_max = le16_to_cpu(mac->ac[e_aci].cw_max);
324 u16 tx_op = le16_to_cpu(mac->ac[e_aci].tx_op);
322 325
323 u4b_ac_param |= (u32) mac->ac[e_aci].aifs; 326 u4b_ac_param = (u32) mac->ac[e_aci].aifs;
324 u4b_ac_param |= ((u32) mac->ac[e_aci].cw_min 327 u4b_ac_param |= ((u32)cw_min
325 & 0xF) << AC_PARAM_ECW_MIN_OFFSET; 328 & 0xF) << AC_PARAM_ECW_MIN_OFFSET;
326 u4b_ac_param |= ((u32) mac->ac[e_aci].cw_max & 329 u4b_ac_param |= ((u32)cw_max &
327 0xF) << AC_PARAM_ECW_MAX_OFFSET; 330 0xF) << AC_PARAM_ECW_MAX_OFFSET;
328 u4b_ac_param |= (u32) mac->ac[e_aci].tx_op 331 u4b_ac_param |= (u32)tx_op << AC_PARAM_TXOP_OFFSET;
329 << AC_PARAM_TXOP_LIMIT_OFFSET;
330 332
331 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, 333 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
332 ("queue:%x, ac_param:%x\n", e_aci, 334 ("queue:%x, ac_param:%x\n", e_aci,
@@ -469,12 +471,12 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
469 break; 471 break;
470 } 472 }
471 case HW_VAR_FW_PSMODE_STATUS: 473 case HW_VAR_FW_PSMODE_STATUS:
472 ppsc->b_fw_current_inpsmode = *((bool *) val); 474 ppsc->fw_current_inpsmode = *((bool *) val);
473 break; 475 break;
474 case HW_VAR_H2C_FW_JOINBSSRPT:{ 476 case HW_VAR_H2C_FW_JOINBSSRPT:{
475 u8 mstatus = (*(u8 *) val); 477 u8 mstatus = (*(u8 *) val);
476 u8 tmp_regcr, tmp_reg422; 478 u8 tmp_regcr, tmp_reg422;
477 bool b_recover = false; 479 bool recover = false;
478 480
479 if (mstatus == RT_MEDIA_CONNECT) { 481 if (mstatus == RT_MEDIA_CONNECT) {
480 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AID, 482 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AID,
@@ -491,7 +493,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
491 rtl_read_byte(rtlpriv, 493 rtl_read_byte(rtlpriv,
492 REG_FWHW_TXQ_CTRL + 2); 494 REG_FWHW_TXQ_CTRL + 2);
493 if (tmp_reg422 & BIT(6)) 495 if (tmp_reg422 & BIT(6))
494 b_recover = true; 496 recover = true;
495 rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, 497 rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
496 tmp_reg422 & (~BIT(6))); 498 tmp_reg422 & (~BIT(6)));
497 499
@@ -500,7 +502,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
500 _rtl92ce_set_bcn_ctrl_reg(hw, BIT(3), 0); 502 _rtl92ce_set_bcn_ctrl_reg(hw, BIT(3), 0);
501 _rtl92ce_set_bcn_ctrl_reg(hw, 0, BIT(4)); 503 _rtl92ce_set_bcn_ctrl_reg(hw, 0, BIT(4));
502 504
503 if (b_recover) { 505 if (recover) {
504 rtl_write_byte(rtlpriv, 506 rtl_write_byte(rtlpriv,
505 REG_FWHW_TXQ_CTRL + 2, 507 REG_FWHW_TXQ_CTRL + 2,
506 tmp_reg422); 508 tmp_reg422);
@@ -868,7 +870,7 @@ static void _rtl92ce_enable_aspm_back_door(struct ieee80211_hw *hw)
868 rtl_write_word(rtlpriv, 0x350, 0x870c); 870 rtl_write_word(rtlpriv, 0x350, 0x870c);
869 rtl_write_byte(rtlpriv, 0x352, 0x1); 871 rtl_write_byte(rtlpriv, 0x352, 0x1);
870 872
871 if (ppsc->b_support_backdoor) 873 if (ppsc->support_backdoor)
872 rtl_write_byte(rtlpriv, 0x349, 0x1b); 874 rtl_write_byte(rtlpriv, 0x349, 0x1b);
873 else 875 else
874 rtl_write_byte(rtlpriv, 0x349, 0x03); 876 rtl_write_byte(rtlpriv, 0x349, 0x03);
@@ -940,10 +942,10 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
940 ("Failed to download FW. Init HW " 942 ("Failed to download FW. Init HW "
941 "without FW now..\n")); 943 "without FW now..\n"));
942 err = 1; 944 err = 1;
943 rtlhal->bfw_ready = false; 945 rtlhal->fw_ready = false;
944 return err; 946 return err;
945 } else { 947 } else {
946 rtlhal->bfw_ready = true; 948 rtlhal->fw_ready = true;
947 } 949 }
948 950
949 rtlhal->last_hmeboxnum = 0; 951 rtlhal->last_hmeboxnum = 0;
@@ -1170,21 +1172,20 @@ void rtl92ce_set_qos(struct ieee80211_hw *hw, int aci)
1170{ 1172{
1171 struct rtl_priv *rtlpriv = rtl_priv(hw); 1173 struct rtl_priv *rtlpriv = rtl_priv(hw);
1172 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 1174 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1173
1174 u32 u4b_ac_param; 1175 u32 u4b_ac_param;
1176 u16 cw_min = le16_to_cpu(mac->ac[aci].cw_min);
1177 u16 cw_max = le16_to_cpu(mac->ac[aci].cw_max);
1178 u16 tx_op = le16_to_cpu(mac->ac[aci].tx_op);
1175 1179
1176 rtl92c_dm_init_edca_turbo(hw); 1180 rtl92c_dm_init_edca_turbo(hw);
1177
1178 u4b_ac_param = (u32) mac->ac[aci].aifs; 1181 u4b_ac_param = (u32) mac->ac[aci].aifs;
1179 u4b_ac_param |= 1182 u4b_ac_param |= (u32) ((cw_min & 0xF) << AC_PARAM_ECW_MIN_OFFSET);
1180 ((u32) mac->ac[aci].cw_min & 0xF) << AC_PARAM_ECW_MIN_OFFSET; 1183 u4b_ac_param |= (u32) ((cw_max & 0xF) << AC_PARAM_ECW_MAX_OFFSET);
1181 u4b_ac_param |= 1184 u4b_ac_param |= (u32) (tx_op << AC_PARAM_TXOP_OFFSET);
1182 ((u32) mac->ac[aci].cw_max & 0xF) << AC_PARAM_ECW_MAX_OFFSET;
1183 u4b_ac_param |= (u32) mac->ac[aci].tx_op << AC_PARAM_TXOP_LIMIT_OFFSET;
1184 RT_TRACE(rtlpriv, COMP_QOS, DBG_DMESG, 1185 RT_TRACE(rtlpriv, COMP_QOS, DBG_DMESG,
1185 ("queue:%x, ac_param:%x aifs:%x cwmin:%x cwmax:%x txop:%x\n", 1186 ("queue:%x, ac_param:%x aifs:%x cwmin:%x cwmax:%x txop:%x\n",
1186 aci, u4b_ac_param, mac->ac[aci].aifs, mac->ac[aci].cw_min, 1187 aci, u4b_ac_param, mac->ac[aci].aifs, cw_min,
1187 mac->ac[aci].cw_max, mac->ac[aci].tx_op)); 1188 cw_max, tx_op));
1188 switch (aci) { 1189 switch (aci) {
1189 case AC1_BK: 1190 case AC1_BK:
1190 rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, u4b_ac_param); 1191 rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, u4b_ac_param);
@@ -1237,7 +1238,7 @@ static void _rtl92ce_poweroff_adapter(struct ieee80211_hw *hw)
1237 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40); 1238 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
1238 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2); 1239 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
1239 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE0); 1240 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE0);
1240 if ((rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) && rtlhal->bfw_ready) 1241 if ((rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) && rtlhal->fw_ready)
1241 rtl92c_firmware_selfreset(hw); 1242 rtl92c_firmware_selfreset(hw);
1242 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, 0x51); 1243 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, 0x51);
1243 rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00); 1244 rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00);
@@ -1335,19 +1336,6 @@ void rtl92ce_update_interrupt_mask(struct ieee80211_hw *hw,
1335 rtl92ce_enable_interrupt(hw); 1336 rtl92ce_enable_interrupt(hw);
1336} 1337}
1337 1338
1338static u8 _rtl92c_get_chnl_group(u8 chnl)
1339{
1340 u8 group;
1341
1342 if (chnl < 3)
1343 group = 0;
1344 else if (chnl < 9)
1345 group = 1;
1346 else
1347 group = 2;
1348 return group;
1349}
1350
1351static void _rtl92ce_read_txpower_info_from_hwpg(struct ieee80211_hw *hw, 1339static void _rtl92ce_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
1352 bool autoload_fail, 1340 bool autoload_fail,
1353 u8 *hwinfo) 1341 u8 *hwinfo)
@@ -1568,7 +1556,7 @@ static void _rtl92ce_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
1568 rtlefuse->eeprom_thermalmeter = (tempval & 0x1f); 1556 rtlefuse->eeprom_thermalmeter = (tempval & 0x1f);
1569 1557
1570 if (rtlefuse->eeprom_thermalmeter == 0x1f || autoload_fail) 1558 if (rtlefuse->eeprom_thermalmeter == 0x1f || autoload_fail)
1571 rtlefuse->b_apk_thermalmeterignore = true; 1559 rtlefuse->apk_thermalmeterignore = true;
1572 1560
1573 rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter; 1561 rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter;
1574 RTPRINT(rtlpriv, FINIT, INIT_TxPower, 1562 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
@@ -1625,7 +1613,7 @@ static void _rtl92ce_read_adapter_info(struct ieee80211_hw *hw)
1625 1613
1626 rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN]; 1614 rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
1627 rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION]; 1615 rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
1628 rtlefuse->b_txpwr_fromeprom = true; 1616 rtlefuse->txpwr_fromeprom = true;
1629 rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID]; 1617 rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
1630 1618
1631 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, 1619 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
@@ -1668,7 +1656,7 @@ static void _rtl92ce_hal_customized_behavior(struct ieee80211_hw *hw)
1668 1656
1669 switch (rtlhal->oem_id) { 1657 switch (rtlhal->oem_id) {
1670 case RT_CID_819x_HP: 1658 case RT_CID_819x_HP:
1671 pcipriv->ledctl.bled_opendrain = true; 1659 pcipriv->ledctl.led_opendrain = true;
1672 break; 1660 break;
1673 case RT_CID_819x_Lenovo: 1661 case RT_CID_819x_Lenovo:
1674 case RT_CID_DEFAULT: 1662 case RT_CID_DEFAULT:
@@ -1693,10 +1681,10 @@ void rtl92ce_read_eeprom_info(struct ieee80211_hw *hw)
1693 1681
1694 rtlhal->version = _rtl92ce_read_chip_version(hw); 1682 rtlhal->version = _rtl92ce_read_chip_version(hw);
1695 if (get_rf_type(rtlphy) == RF_1T1R) 1683 if (get_rf_type(rtlphy) == RF_1T1R)
1696 rtlpriv->dm.brfpath_rxenable[0] = true; 1684 rtlpriv->dm.rfpath_rxenable[0] = true;
1697 else 1685 else
1698 rtlpriv->dm.brfpath_rxenable[0] = 1686 rtlpriv->dm.rfpath_rxenable[0] =
1699 rtlpriv->dm.brfpath_rxenable[1] = true; 1687 rtlpriv->dm.rfpath_rxenable[1] = true;
1700 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("VersionID = 0x%4x\n", 1688 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("VersionID = 0x%4x\n",
1701 rtlhal->version)); 1689 rtlhal->version));
1702 tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR); 1690 tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
@@ -1725,18 +1713,18 @@ void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw)
1725 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 1713 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1726 1714
1727 u32 ratr_value = (u32) mac->basic_rates; 1715 u32 ratr_value = (u32) mac->basic_rates;
1728 u8 *p_mcsrate = mac->mcs; 1716 u8 *mcsrate = mac->mcs;
1729 u8 ratr_index = 0; 1717 u8 ratr_index = 0;
1730 u8 b_nmode = mac->ht_enable; 1718 u8 nmode = mac->ht_enable;
1731 u8 mimo_ps = 1; 1719 u8 mimo_ps = 1;
1732 u16 shortgi_rate; 1720 u16 shortgi_rate;
1733 u32 tmp_ratr_value; 1721 u32 tmp_ratr_value;
1734 u8 b_curtxbw_40mhz = mac->bw_40; 1722 u8 curtxbw_40mhz = mac->bw_40;
1735 u8 b_curshortgi_40mhz = mac->sgi_40; 1723 u8 curshortgi_40mhz = mac->sgi_40;
1736 u8 b_curshortgi_20mhz = mac->sgi_20; 1724 u8 curshortgi_20mhz = mac->sgi_20;
1737 enum wireless_mode wirelessmode = mac->mode; 1725 enum wireless_mode wirelessmode = mac->mode;
1738 1726
1739 ratr_value |= EF2BYTE((*(u16 *) (p_mcsrate))) << 12; 1727 ratr_value |= ((*(u16 *) (mcsrate))) << 12;
1740 1728
1741 switch (wirelessmode) { 1729 switch (wirelessmode) {
1742 case WIRELESS_MODE_B: 1730 case WIRELESS_MODE_B:
@@ -1750,7 +1738,7 @@ void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw)
1750 break; 1738 break;
1751 case WIRELESS_MODE_N_24G: 1739 case WIRELESS_MODE_N_24G:
1752 case WIRELESS_MODE_N_5G: 1740 case WIRELESS_MODE_N_5G:
1753 b_nmode = 1; 1741 nmode = 1;
1754 if (mimo_ps == 0) { 1742 if (mimo_ps == 0) {
1755 ratr_value &= 0x0007F005; 1743 ratr_value &= 0x0007F005;
1756 } else { 1744 } else {
@@ -1776,9 +1764,8 @@ void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw)
1776 1764
1777 ratr_value &= 0x0FFFFFFF; 1765 ratr_value &= 0x0FFFFFFF;
1778 1766
1779 if (b_nmode && ((b_curtxbw_40mhz && 1767 if (nmode && ((curtxbw_40mhz && curshortgi_40mhz) || (!curtxbw_40mhz &&
1780 b_curshortgi_40mhz) || (!b_curtxbw_40mhz && 1768 curshortgi_20mhz))) {
1781 b_curshortgi_20mhz))) {
1782 1769
1783 ratr_value |= 0x10000000; 1770 ratr_value |= 0x10000000;
1784 tmp_ratr_value = (ratr_value >> 12); 1771 tmp_ratr_value = (ratr_value >> 12);
@@ -1806,11 +1793,11 @@ void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
1806 u32 ratr_bitmap = (u32) mac->basic_rates; 1793 u32 ratr_bitmap = (u32) mac->basic_rates;
1807 u8 *p_mcsrate = mac->mcs; 1794 u8 *p_mcsrate = mac->mcs;
1808 u8 ratr_index; 1795 u8 ratr_index;
1809 u8 b_curtxbw_40mhz = mac->bw_40; 1796 u8 curtxbw_40mhz = mac->bw_40;
1810 u8 b_curshortgi_40mhz = mac->sgi_40; 1797 u8 curshortgi_40mhz = mac->sgi_40;
1811 u8 b_curshortgi_20mhz = mac->sgi_20; 1798 u8 curshortgi_20mhz = mac->sgi_20;
1812 enum wireless_mode wirelessmode = mac->mode; 1799 enum wireless_mode wirelessmode = mac->mode;
1813 bool b_shortgi = false; 1800 bool shortgi = false;
1814 u8 rate_mask[5]; 1801 u8 rate_mask[5];
1815 u8 macid = 0; 1802 u8 macid = 0;
1816 u8 mimops = 1; 1803 u8 mimops = 1;
@@ -1852,7 +1839,7 @@ void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
1852 } else { 1839 } else {
1853 if (rtlphy->rf_type == RF_1T2R || 1840 if (rtlphy->rf_type == RF_1T2R ||
1854 rtlphy->rf_type == RF_1T1R) { 1841 rtlphy->rf_type == RF_1T1R) {
1855 if (b_curtxbw_40mhz) { 1842 if (curtxbw_40mhz) {
1856 if (rssi_level == 1) 1843 if (rssi_level == 1)
1857 ratr_bitmap &= 0x000f0000; 1844 ratr_bitmap &= 0x000f0000;
1858 else if (rssi_level == 2) 1845 else if (rssi_level == 2)
@@ -1868,7 +1855,7 @@ void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
1868 ratr_bitmap &= 0x000ff005; 1855 ratr_bitmap &= 0x000ff005;
1869 } 1856 }
1870 } else { 1857 } else {
1871 if (b_curtxbw_40mhz) { 1858 if (curtxbw_40mhz) {
1872 if (rssi_level == 1) 1859 if (rssi_level == 1)
1873 ratr_bitmap &= 0x0f0f0000; 1860 ratr_bitmap &= 0x0f0f0000;
1874 else if (rssi_level == 2) 1861 else if (rssi_level == 2)
@@ -1886,13 +1873,13 @@ void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
1886 } 1873 }
1887 } 1874 }
1888 1875
1889 if ((b_curtxbw_40mhz && b_curshortgi_40mhz) || 1876 if ((curtxbw_40mhz && curshortgi_40mhz) ||
1890 (!b_curtxbw_40mhz && b_curshortgi_20mhz)) { 1877 (!curtxbw_40mhz && curshortgi_20mhz)) {
1891 1878
1892 if (macid == 0) 1879 if (macid == 0)
1893 b_shortgi = true; 1880 shortgi = true;
1894 else if (macid == 1) 1881 else if (macid == 1)
1895 b_shortgi = false; 1882 shortgi = false;
1896 } 1883 }
1897 break; 1884 break;
1898 default: 1885 default:
@@ -1906,9 +1893,9 @@ void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
1906 } 1893 }
1907 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, 1894 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
1908 ("ratr_bitmap :%x\n", ratr_bitmap)); 1895 ("ratr_bitmap :%x\n", ratr_bitmap));
1909 *(u32 *)&rate_mask = EF4BYTE((ratr_bitmap & 0x0fffffff) | 1896 *(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) |
1910 (ratr_index << 28)); 1897 (ratr_index << 28);
1911 rate_mask[4] = macid | (b_shortgi ? 0x20 : 0x00) | 0x80; 1898 rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
1912 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, ("Rate_index:%x, " 1899 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, ("Rate_index:%x, "
1913 "ratr_val:%x, %x:%x:%x:%x:%x\n", 1900 "ratr_val:%x, %x:%x:%x:%x:%x\n",
1914 ratr_index, ratr_bitmap, 1901 ratr_index, ratr_bitmap,
@@ -1940,13 +1927,13 @@ bool rtl92ce_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
1940 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 1927 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1941 enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate; 1928 enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate;
1942 u8 u1tmp; 1929 u8 u1tmp;
1943 bool b_actuallyset = false; 1930 bool actuallyset = false;
1944 unsigned long flag; 1931 unsigned long flag;
1945 1932
1946 if ((rtlpci->up_first_time == 1) || (rtlpci->being_init_adapter)) 1933 if ((rtlpci->up_first_time == 1) || (rtlpci->being_init_adapter))
1947 return false; 1934 return false;
1948 1935
1949 if (ppsc->b_swrf_processing) 1936 if (ppsc->swrf_processing)
1950 return false; 1937 return false;
1951 1938
1952 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag); 1939 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
@@ -1972,24 +1959,24 @@ bool rtl92ce_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
1972 u1tmp = rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL); 1959 u1tmp = rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL);
1973 e_rfpowerstate_toset = (u1tmp & BIT(3)) ? ERFON : ERFOFF; 1960 e_rfpowerstate_toset = (u1tmp & BIT(3)) ? ERFON : ERFOFF;
1974 1961
1975 if ((ppsc->b_hwradiooff == true) && (e_rfpowerstate_toset == ERFON)) { 1962 if ((ppsc->hwradiooff == true) && (e_rfpowerstate_toset == ERFON)) {
1976 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, 1963 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
1977 ("GPIOChangeRF - HW Radio ON, RF ON\n")); 1964 ("GPIOChangeRF - HW Radio ON, RF ON\n"));
1978 1965
1979 e_rfpowerstate_toset = ERFON; 1966 e_rfpowerstate_toset = ERFON;
1980 ppsc->b_hwradiooff = false; 1967 ppsc->hwradiooff = false;
1981 b_actuallyset = true; 1968 actuallyset = true;
1982 } else if ((ppsc->b_hwradiooff == false) 1969 } else if ((ppsc->hwradiooff == false)
1983 && (e_rfpowerstate_toset == ERFOFF)) { 1970 && (e_rfpowerstate_toset == ERFOFF)) {
1984 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, 1971 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
1985 ("GPIOChangeRF - HW Radio OFF, RF OFF\n")); 1972 ("GPIOChangeRF - HW Radio OFF, RF OFF\n"));
1986 1973
1987 e_rfpowerstate_toset = ERFOFF; 1974 e_rfpowerstate_toset = ERFOFF;
1988 ppsc->b_hwradiooff = true; 1975 ppsc->hwradiooff = true;
1989 b_actuallyset = true; 1976 actuallyset = true;
1990 } 1977 }
1991 1978
1992 if (b_actuallyset) { 1979 if (actuallyset) {
1993 if (e_rfpowerstate_toset == ERFON) { 1980 if (e_rfpowerstate_toset == ERFON) {
1994 if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) && 1981 if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) &&
1995 RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM)) { 1982 RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM)) {
@@ -2028,7 +2015,7 @@ bool rtl92ce_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
2028 } 2015 }
2029 2016
2030 *valid = 1; 2017 *valid = 1;
2031 return !ppsc->b_hwradiooff; 2018 return !ppsc->hwradiooff;
2032 2019
2033} 2020}
2034 2021
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/led.c b/drivers/net/wireless/rtlwifi/rtl8192ce/led.c
index 78a0569208e..7b1da8d7508 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/led.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/led.c
@@ -57,7 +57,7 @@ void rtl92ce_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
57 ("switch case not process\n")); 57 ("switch case not process\n"));
58 break; 58 break;
59 } 59 }
60 pled->b_ledon = true; 60 pled->ledon = true;
61} 61}
62 62
63void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) 63void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
@@ -76,7 +76,7 @@ void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
76 break; 76 break;
77 case LED_PIN_LED0: 77 case LED_PIN_LED0:
78 ledcfg &= 0xf0; 78 ledcfg &= 0xf0;
79 if (pcipriv->ledctl.bled_opendrain == true) 79 if (pcipriv->ledctl.led_opendrain == true)
80 rtl_write_byte(rtlpriv, REG_LEDCFG2, 80 rtl_write_byte(rtlpriv, REG_LEDCFG2,
81 (ledcfg | BIT(1) | BIT(5) | BIT(6))); 81 (ledcfg | BIT(1) | BIT(5) | BIT(6)));
82 else 82 else
@@ -92,7 +92,7 @@ void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
92 ("switch case not process\n")); 92 ("switch case not process\n"));
93 break; 93 break;
94 } 94 }
95 pled->b_ledon = false; 95 pled->ledon = false;
96} 96}
97 97
98void rtl92ce_init_sw_leds(struct ieee80211_hw *hw) 98void rtl92ce_init_sw_leds(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
index 45044117139..191106033b3 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
@@ -37,82 +37,7 @@
37#include "dm.h" 37#include "dm.h"
38#include "table.h" 38#include "table.h"
39 39
40static u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw, 40#include "../rtl8192c/phy_common.c"
41 enum radio_path rfpath, u32 offset);
42static void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
43 enum radio_path rfpath, u32 offset,
44 u32 data);
45static u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
46 enum radio_path rfpath, u32 offset);
47static void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
48 enum radio_path rfpath, u32 offset,
49 u32 data);
50static u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask);
51static bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw);
52static bool _rtl92c_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
53static bool _rtl92c_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
54 u8 configtype);
55static bool _rtl92c_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
56 u8 configtype);
57static void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw);
58static bool _rtl92c_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
59 u32 cmdtableidx, u32 cmdtablesz,
60 enum swchnlcmd_id cmdid, u32 para1,
61 u32 para2, u32 msdelay);
62static bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
63 u8 channel, u8 *stage, u8 *step,
64 u32 *delay);
65static u8 _rtl92c_phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw,
66 enum wireless_mode wirelessmode,
67 long power_indbm);
68static bool _rtl92c_phy_config_rf_external_pa(struct ieee80211_hw *hw,
69 enum radio_path rfpath);
70static long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
71 enum wireless_mode wirelessmode,
72 u8 txpwridx);
73u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
74{
75 struct rtl_priv *rtlpriv = rtl_priv(hw);
76 u32 returnvalue, originalvalue, bitshift;
77
78 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), "
79 "bitmask(%#x)\n", regaddr,
80 bitmask));
81 originalvalue = rtl_read_dword(rtlpriv, regaddr);
82 bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
83 returnvalue = (originalvalue & bitmask) >> bitshift;
84
85 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("BBR MASK=0x%x "
86 "Addr[0x%x]=0x%x\n", bitmask,
87 regaddr, originalvalue));
88
89 return returnvalue;
90
91}
92
93void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
94 u32 regaddr, u32 bitmask, u32 data)
95{
96 struct rtl_priv *rtlpriv = rtl_priv(hw);
97 u32 originalvalue, bitshift;
98
99 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
100 " data(%#x)\n", regaddr, bitmask,
101 data));
102
103 if (bitmask != MASKDWORD) {
104 originalvalue = rtl_read_dword(rtlpriv, regaddr);
105 bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
106 data = ((originalvalue & (~bitmask)) | (data << bitshift));
107 }
108
109 rtl_write_dword(rtlpriv, regaddr, data);
110
111 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
112 " data(%#x)\n", regaddr, bitmask,
113 data));
114
115}
116 41
117u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw, 42u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
118 enum radio_path rfpath, u32 regaddr, u32 bitmask) 43 enum radio_path rfpath, u32 regaddr, u32 bitmask)
@@ -197,118 +122,6 @@ void rtl92c_phy_set_rf_reg(struct ieee80211_hw *hw,
197 bitmask, data, rfpath)); 122 bitmask, data, rfpath));
198} 123}
199 124
200static u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw,
201 enum radio_path rfpath, u32 offset)
202{
203 RT_ASSERT(false, ("deprecated!\n"));
204 return 0;
205}
206
207static void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
208 enum radio_path rfpath, u32 offset,
209 u32 data)
210{
211 RT_ASSERT(false, ("deprecated!\n"));
212}
213
214static u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
215 enum radio_path rfpath, u32 offset)
216{
217 struct rtl_priv *rtlpriv = rtl_priv(hw);
218 struct rtl_phy *rtlphy = &(rtlpriv->phy);
219 struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
220 u32 newoffset;
221 u32 tmplong, tmplong2;
222 u8 rfpi_enable = 0;
223 u32 retvalue;
224
225 offset &= 0x3f;
226 newoffset = offset;
227 if (RT_CANNOT_IO(hw)) {
228 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("return all one\n"));
229 return 0xFFFFFFFF;
230 }
231 tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
232 if (rfpath == RF90_PATH_A)
233 tmplong2 = tmplong;
234 else
235 tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD);
236 tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) |
237 (newoffset << 23) | BLSSIREADEDGE;
238 rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
239 tmplong & (~BLSSIREADEDGE));
240 mdelay(1);
241 rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2);
242 mdelay(1);
243 rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
244 tmplong | BLSSIREADEDGE);
245 mdelay(1);
246 if (rfpath == RF90_PATH_A)
247 rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1,
248 BIT(8));
249 else if (rfpath == RF90_PATH_B)
250 rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1,
251 BIT(8));
252 if (rfpi_enable)
253 retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readbackpi,
254 BLSSIREADBACKDATA);
255 else
256 retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readback,
257 BLSSIREADBACKDATA);
258 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("RFR-%d Addr[0x%x]=0x%x\n",
259 rfpath, pphyreg->rflssi_readback,
260 retvalue));
261 return retvalue;
262}
263
264static void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
265 enum radio_path rfpath, u32 offset,
266 u32 data)
267{
268 u32 data_and_addr;
269 u32 newoffset;
270 struct rtl_priv *rtlpriv = rtl_priv(hw);
271 struct rtl_phy *rtlphy = &(rtlpriv->phy);
272 struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
273
274 if (RT_CANNOT_IO(hw)) {
275 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("stop\n"));
276 return;
277 }
278 offset &= 0x3f;
279 newoffset = offset;
280 data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
281 rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
282 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("RFW-%d Addr[0x%x]=0x%x\n",
283 rfpath, pphyreg->rf3wire_offset,
284 data_and_addr));
285}
286
287static u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask)
288{
289 u32 i;
290
291 for (i = 0; i <= 31; i++) {
292 if (((bitmask >> i) & 0x1) == 1)
293 break;
294 }
295 return i;
296}
297
298static void _rtl92c_phy_bb_config_1t(struct ieee80211_hw *hw)
299{
300 rtl_set_bbreg(hw, RFPGA0_TXINFO, 0x3, 0x2);
301 rtl_set_bbreg(hw, RFPGA1_TXINFO, 0x300033, 0x200022);
302 rtl_set_bbreg(hw, RCCK0_AFESETTING, MASKBYTE3, 0x45);
303 rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x23);
304 rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, 0x30, 0x1);
305 rtl_set_bbreg(hw, 0xe74, 0x0c000000, 0x2);
306 rtl_set_bbreg(hw, 0xe78, 0x0c000000, 0x2);
307 rtl_set_bbreg(hw, 0xe7c, 0x0c000000, 0x2);
308 rtl_set_bbreg(hw, 0xe80, 0x0c000000, 0x2);
309 rtl_set_bbreg(hw, 0xe88, 0x0c000000, 0x2);
310}
311
312bool rtl92c_phy_mac_config(struct ieee80211_hw *hw) 125bool rtl92c_phy_mac_config(struct ieee80211_hw *hw)
313{ 126{
314 struct rtl_priv *rtlpriv = rtl_priv(hw); 127 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -327,7 +140,7 @@ bool rtl92c_phy_bb_config(struct ieee80211_hw *hw)
327 struct rtl_priv *rtlpriv = rtl_priv(hw); 140 struct rtl_priv *rtlpriv = rtl_priv(hw);
328 u16 regval; 141 u16 regval;
329 u32 regvaldw; 142 u32 regvaldw;
330 u8 b_reg_hwparafile = 1; 143 u8 reg_hwparafile = 1;
331 144
332 _rtl92c_phy_init_bb_rf_register_definition(hw); 145 _rtl92c_phy_init_bb_rf_register_definition(hw);
333 regval = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN); 146 regval = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
@@ -342,55 +155,11 @@ bool rtl92c_phy_bb_config(struct ieee80211_hw *hw)
342 rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL + 1, 0x80); 155 rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL + 1, 0x80);
343 regvaldw = rtl_read_dword(rtlpriv, REG_LEDCFG0); 156 regvaldw = rtl_read_dword(rtlpriv, REG_LEDCFG0);
344 rtl_write_dword(rtlpriv, REG_LEDCFG0, regvaldw | BIT(23)); 157 rtl_write_dword(rtlpriv, REG_LEDCFG0, regvaldw | BIT(23));
345 if (b_reg_hwparafile == 1) 158 if (reg_hwparafile == 1)
346 rtstatus = _rtl92c_phy_bb8192c_config_parafile(hw); 159 rtstatus = _rtl92c_phy_bb8192c_config_parafile(hw);
347 return rtstatus; 160 return rtstatus;
348} 161}
349 162
350bool rtl92c_phy_rf_config(struct ieee80211_hw *hw)
351{
352 return rtl92c_phy_rf6052_config(hw);
353}
354
355static bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw)
356{
357 struct rtl_priv *rtlpriv = rtl_priv(hw);
358 struct rtl_phy *rtlphy = &(rtlpriv->phy);
359 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
360 bool rtstatus;
361
362 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("==>\n"));
363 rtstatus = _rtl92c_phy_config_bb_with_headerfile(hw,
364 BASEBAND_CONFIG_PHY_REG);
365 if (rtstatus != true) {
366 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("Write BB Reg Fail!!"));
367 return false;
368 }
369 if (rtlphy->rf_type == RF_1T2R) {
370 _rtl92c_phy_bb_config_1t(hw);
371 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Config to 1T!!\n"));
372 }
373 if (rtlefuse->autoload_failflag == false) {
374 rtlphy->pwrgroup_cnt = 0;
375 rtstatus = _rtl92c_phy_config_bb_with_pgheaderfile(hw,
376 BASEBAND_CONFIG_PHY_REG);
377 }
378 if (rtstatus != true) {
379 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("BB_PG Reg Fail!!"));
380 return false;
381 }
382 rtstatus = _rtl92c_phy_config_bb_with_headerfile(hw,
383 BASEBAND_CONFIG_AGC_TAB);
384 if (rtstatus != true) {
385 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("AGC Table Fail\n"));
386 return false;
387 }
388 rtlphy->bcck_high_power = (bool) (rtl_get_bbreg(hw,
389 RFPGA0_XA_HSSIPARAMETER2,
390 0x200));
391 return true;
392}
393
394static bool _rtl92c_phy_config_mac_with_headerfile(struct ieee80211_hw *hw) 163static bool _rtl92c_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
395{ 164{
396 struct rtl_priv *rtlpriv = rtl_priv(hw); 165 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -408,10 +177,6 @@ static bool _rtl92c_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
408 return true; 177 return true;
409} 178}
410 179
411void rtl92c_phy_config_bb_external_pa(struct ieee80211_hw *hw)
412{
413}
414
415static bool _rtl92c_phy_config_bb_with_headerfile(struct ieee80211_hw *hw, 180static bool _rtl92c_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
416 u8 configtype) 181 u8 configtype)
417{ 182{
@@ -472,174 +237,6 @@ static bool _rtl92c_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
472 return true; 237 return true;
473} 238}
474 239
475static void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
476 u32 regaddr, u32 bitmask,
477 u32 data)
478{
479 struct rtl_priv *rtlpriv = rtl_priv(hw);
480 struct rtl_phy *rtlphy = &(rtlpriv->phy);
481
482 if (regaddr == RTXAGC_A_RATE18_06) {
483 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][0] =
484 data;
485 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
486 ("MCSTxPowerLevelOriginalOffset[%d][0] = 0x%x\n",
487 rtlphy->pwrgroup_cnt,
488 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
489 pwrgroup_cnt][0]));
490 }
491 if (regaddr == RTXAGC_A_RATE54_24) {
492 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][1] =
493 data;
494 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
495 ("MCSTxPowerLevelOriginalOffset[%d][1] = 0x%x\n",
496 rtlphy->pwrgroup_cnt,
497 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
498 pwrgroup_cnt][1]));
499 }
500 if (regaddr == RTXAGC_A_CCK1_MCS32) {
501 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][6] =
502 data;
503 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
504 ("MCSTxPowerLevelOriginalOffset[%d][6] = 0x%x\n",
505 rtlphy->pwrgroup_cnt,
506 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
507 pwrgroup_cnt][6]));
508 }
509 if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0xffffff00) {
510 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][7] =
511 data;
512 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
513 ("MCSTxPowerLevelOriginalOffset[%d][7] = 0x%x\n",
514 rtlphy->pwrgroup_cnt,
515 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
516 pwrgroup_cnt][7]));
517 }
518 if (regaddr == RTXAGC_A_MCS03_MCS00) {
519 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][2] =
520 data;
521 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
522 ("MCSTxPowerLevelOriginalOffset[%d][2] = 0x%x\n",
523 rtlphy->pwrgroup_cnt,
524 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
525 pwrgroup_cnt][2]));
526 }
527 if (regaddr == RTXAGC_A_MCS07_MCS04) {
528 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][3] =
529 data;
530 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
531 ("MCSTxPowerLevelOriginalOffset[%d][3] = 0x%x\n",
532 rtlphy->pwrgroup_cnt,
533 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
534 pwrgroup_cnt][3]));
535 }
536 if (regaddr == RTXAGC_A_MCS11_MCS08) {
537 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][4] =
538 data;
539 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
540 ("MCSTxPowerLevelOriginalOffset[%d][4] = 0x%x\n",
541 rtlphy->pwrgroup_cnt,
542 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
543 pwrgroup_cnt][4]));
544 }
545 if (regaddr == RTXAGC_A_MCS15_MCS12) {
546 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][5] =
547 data;
548 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
549 ("MCSTxPowerLevelOriginalOffset[%d][5] = 0x%x\n",
550 rtlphy->pwrgroup_cnt,
551 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
552 pwrgroup_cnt][5]));
553 }
554 if (regaddr == RTXAGC_B_RATE18_06) {
555 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][8] =
556 data;
557 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
558 ("MCSTxPowerLevelOriginalOffset[%d][8] = 0x%x\n",
559 rtlphy->pwrgroup_cnt,
560 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
561 pwrgroup_cnt][8]));
562 }
563 if (regaddr == RTXAGC_B_RATE54_24) {
564 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][9] =
565 data;
566
567 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
568 ("MCSTxPowerLevelOriginalOffset[%d][9] = 0x%x\n",
569 rtlphy->pwrgroup_cnt,
570 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
571 pwrgroup_cnt][9]));
572 }
573
574 if (regaddr == RTXAGC_B_CCK1_55_MCS32) {
575 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][14] =
576 data;
577
578 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
579 ("MCSTxPowerLevelOriginalOffset[%d][14] = 0x%x\n",
580 rtlphy->pwrgroup_cnt,
581 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
582 pwrgroup_cnt][14]));
583 }
584
585 if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0x000000ff) {
586 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][15] =
587 data;
588
589 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
590 ("MCSTxPowerLevelOriginalOffset[%d][15] = 0x%x\n",
591 rtlphy->pwrgroup_cnt,
592 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
593 pwrgroup_cnt][15]));
594 }
595
596 if (regaddr == RTXAGC_B_MCS03_MCS00) {
597 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][10] =
598 data;
599
600 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
601 ("MCSTxPowerLevelOriginalOffset[%d][10] = 0x%x\n",
602 rtlphy->pwrgroup_cnt,
603 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
604 pwrgroup_cnt][10]));
605 }
606
607 if (regaddr == RTXAGC_B_MCS07_MCS04) {
608 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][11] =
609 data;
610
611 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
612 ("MCSTxPowerLevelOriginalOffset[%d][11] = 0x%x\n",
613 rtlphy->pwrgroup_cnt,
614 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
615 pwrgroup_cnt][11]));
616 }
617
618 if (regaddr == RTXAGC_B_MCS11_MCS08) {
619 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][12] =
620 data;
621
622 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
623 ("MCSTxPowerLevelOriginalOffset[%d][12] = 0x%x\n",
624 rtlphy->pwrgroup_cnt,
625 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
626 pwrgroup_cnt][12]));
627 }
628
629 if (regaddr == RTXAGC_B_MCS15_MCS12) {
630 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][13] =
631 data;
632
633 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
634 ("MCSTxPowerLevelOriginalOffset[%d][13] = 0x%x\n",
635 rtlphy->pwrgroup_cnt,
636 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
637 pwrgroup_cnt][13]));
638
639 rtlphy->pwrgroup_cnt++;
640 }
641}
642
643static bool _rtl92c_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw, 240static bool _rtl92c_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
644 u8 configtype) 241 u8 configtype)
645{ 242{
@@ -679,12 +276,6 @@ static bool _rtl92c_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
679 return true; 276 return true;
680} 277}
681 278
682static bool _rtl92c_phy_config_rf_external_pa(struct ieee80211_hw *hw,
683 enum radio_path rfpath)
684{
685 return true;
686}
687
688bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, 279bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
689 enum radio_path rfpath) 280 enum radio_path rfpath)
690{ 281{
@@ -776,345 +367,6 @@ bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
776 return true; 367 return true;
777} 368}
778 369
779void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
780{
781 struct rtl_priv *rtlpriv = rtl_priv(hw);
782 struct rtl_phy *rtlphy = &(rtlpriv->phy);
783
784 rtlphy->default_initialgain[0] =
785 (u8) rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0);
786 rtlphy->default_initialgain[1] =
787 (u8) rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0);
788 rtlphy->default_initialgain[2] =
789 (u8) rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, MASKBYTE0);
790 rtlphy->default_initialgain[3] =
791 (u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0);
792
793 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
794 ("Default initial gain (c50=0x%x, "
795 "c58=0x%x, c60=0x%x, c68=0x%x\n",
796 rtlphy->default_initialgain[0],
797 rtlphy->default_initialgain[1],
798 rtlphy->default_initialgain[2],
799 rtlphy->default_initialgain[3]));
800
801 rtlphy->framesync = (u8) rtl_get_bbreg(hw,
802 ROFDM0_RXDETECTOR3, MASKBYTE0);
803 rtlphy->framesync_c34 = rtl_get_bbreg(hw,
804 ROFDM0_RXDETECTOR2, MASKDWORD);
805
806 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
807 ("Default framesync (0x%x) = 0x%x\n",
808 ROFDM0_RXDETECTOR3, rtlphy->framesync));
809}
810
811static void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
812{
813 struct rtl_priv *rtlpriv = rtl_priv(hw);
814 struct rtl_phy *rtlphy = &(rtlpriv->phy);
815
816 rtlphy->phyreg_def[RF90_PATH_A].rfintfs = RFPGA0_XAB_RFINTERFACESW;
817 rtlphy->phyreg_def[RF90_PATH_B].rfintfs = RFPGA0_XAB_RFINTERFACESW;
818 rtlphy->phyreg_def[RF90_PATH_C].rfintfs = RFPGA0_XCD_RFINTERFACESW;
819 rtlphy->phyreg_def[RF90_PATH_D].rfintfs = RFPGA0_XCD_RFINTERFACESW;
820
821 rtlphy->phyreg_def[RF90_PATH_A].rfintfi = RFPGA0_XAB_RFINTERFACERB;
822 rtlphy->phyreg_def[RF90_PATH_B].rfintfi = RFPGA0_XAB_RFINTERFACERB;
823 rtlphy->phyreg_def[RF90_PATH_C].rfintfi = RFPGA0_XCD_RFINTERFACERB;
824 rtlphy->phyreg_def[RF90_PATH_D].rfintfi = RFPGA0_XCD_RFINTERFACERB;
825
826 rtlphy->phyreg_def[RF90_PATH_A].rfintfo = RFPGA0_XA_RFINTERFACEOE;
827 rtlphy->phyreg_def[RF90_PATH_B].rfintfo = RFPGA0_XB_RFINTERFACEOE;
828
829 rtlphy->phyreg_def[RF90_PATH_A].rfintfe = RFPGA0_XA_RFINTERFACEOE;
830 rtlphy->phyreg_def[RF90_PATH_B].rfintfe = RFPGA0_XB_RFINTERFACEOE;
831
832 rtlphy->phyreg_def[RF90_PATH_A].rf3wire_offset =
833 RFPGA0_XA_LSSIPARAMETER;
834 rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset =
835 RFPGA0_XB_LSSIPARAMETER;
836
837 rtlphy->phyreg_def[RF90_PATH_A].rflssi_select = rFPGA0_XAB_RFPARAMETER;
838 rtlphy->phyreg_def[RF90_PATH_B].rflssi_select = rFPGA0_XAB_RFPARAMETER;
839 rtlphy->phyreg_def[RF90_PATH_C].rflssi_select = rFPGA0_XCD_RFPARAMETER;
840 rtlphy->phyreg_def[RF90_PATH_D].rflssi_select = rFPGA0_XCD_RFPARAMETER;
841
842 rtlphy->phyreg_def[RF90_PATH_A].rftxgain_stage = RFPGA0_TXGAINSTAGE;
843 rtlphy->phyreg_def[RF90_PATH_B].rftxgain_stage = RFPGA0_TXGAINSTAGE;
844 rtlphy->phyreg_def[RF90_PATH_C].rftxgain_stage = RFPGA0_TXGAINSTAGE;
845 rtlphy->phyreg_def[RF90_PATH_D].rftxgain_stage = RFPGA0_TXGAINSTAGE;
846
847 rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para1 = RFPGA0_XA_HSSIPARAMETER1;
848 rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para1 = RFPGA0_XB_HSSIPARAMETER1;
849
850 rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RFPGA0_XA_HSSIPARAMETER2;
851 rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RFPGA0_XB_HSSIPARAMETER2;
852
853 rtlphy->phyreg_def[RF90_PATH_A].rfswitch_control =
854 RFPGA0_XAB_SWITCHCONTROL;
855 rtlphy->phyreg_def[RF90_PATH_B].rfswitch_control =
856 RFPGA0_XAB_SWITCHCONTROL;
857 rtlphy->phyreg_def[RF90_PATH_C].rfswitch_control =
858 RFPGA0_XCD_SWITCHCONTROL;
859 rtlphy->phyreg_def[RF90_PATH_D].rfswitch_control =
860 RFPGA0_XCD_SWITCHCONTROL;
861
862 rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1;
863 rtlphy->phyreg_def[RF90_PATH_B].rfagc_control1 = ROFDM0_XBAGCCORE1;
864 rtlphy->phyreg_def[RF90_PATH_C].rfagc_control1 = ROFDM0_XCAGCCORE1;
865 rtlphy->phyreg_def[RF90_PATH_D].rfagc_control1 = ROFDM0_XDAGCCORE1;
866
867 rtlphy->phyreg_def[RF90_PATH_A].rfagc_control2 = ROFDM0_XAAGCCORE2;
868 rtlphy->phyreg_def[RF90_PATH_B].rfagc_control2 = ROFDM0_XBAGCCORE2;
869 rtlphy->phyreg_def[RF90_PATH_C].rfagc_control2 = ROFDM0_XCAGCCORE2;
870 rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2;
871
872 rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbalance =
873 ROFDM0_XARXIQIMBALANCE;
874 rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbalance =
875 ROFDM0_XBRXIQIMBALANCE;
876 rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbalance =
877 ROFDM0_XCRXIQIMBANLANCE;
878 rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbalance =
879 ROFDM0_XDRXIQIMBALANCE;
880
881 rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE;
882 rtlphy->phyreg_def[RF90_PATH_B].rfrx_afe = ROFDM0_XBRXAFE;
883 rtlphy->phyreg_def[RF90_PATH_C].rfrx_afe = ROFDM0_XCRXAFE;
884 rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE;
885
886 rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbalance =
887 ROFDM0_XATXIQIMBALANCE;
888 rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbalance =
889 ROFDM0_XBTXIQIMBALANCE;
890 rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbalance =
891 ROFDM0_XCTXIQIMBALANCE;
892 rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbalance =
893 ROFDM0_XDTXIQIMBALANCE;
894
895 rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE;
896 rtlphy->phyreg_def[RF90_PATH_B].rftx_afe = ROFDM0_XBTXAFE;
897 rtlphy->phyreg_def[RF90_PATH_C].rftx_afe = ROFDM0_XCTXAFE;
898 rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTXAFE;
899
900 rtlphy->phyreg_def[RF90_PATH_A].rflssi_readback =
901 RFPGA0_XA_LSSIREADBACK;
902 rtlphy->phyreg_def[RF90_PATH_B].rflssi_readback =
903 RFPGA0_XB_LSSIREADBACK;
904 rtlphy->phyreg_def[RF90_PATH_C].rflssi_readback =
905 RFPGA0_XC_LSSIREADBACK;
906 rtlphy->phyreg_def[RF90_PATH_D].rflssi_readback =
907 RFPGA0_XD_LSSIREADBACK;
908
909 rtlphy->phyreg_def[RF90_PATH_A].rflssi_readbackpi =
910 TRANSCEIVEA_HSPI_READBACK;
911 rtlphy->phyreg_def[RF90_PATH_B].rflssi_readbackpi =
912 TRANSCEIVEB_HSPI_READBACK;
913
914}
915
916void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel)
917{
918 struct rtl_priv *rtlpriv = rtl_priv(hw);
919 struct rtl_phy *rtlphy = &(rtlpriv->phy);
920 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
921 u8 txpwr_level;
922 long txpwr_dbm;
923
924 txpwr_level = rtlphy->cur_cck_txpwridx;
925 txpwr_dbm = _rtl92c_phy_txpwr_idx_to_dbm(hw,
926 WIRELESS_MODE_B, txpwr_level);
927 txpwr_level = rtlphy->cur_ofdm24g_txpwridx +
928 rtlefuse->legacy_ht_txpowerdiff;
929 if (_rtl92c_phy_txpwr_idx_to_dbm(hw,
930 WIRELESS_MODE_G,
931 txpwr_level) > txpwr_dbm)
932 txpwr_dbm =
933 _rtl92c_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G,
934 txpwr_level);
935 txpwr_level = rtlphy->cur_ofdm24g_txpwridx;
936 if (_rtl92c_phy_txpwr_idx_to_dbm(hw,
937 WIRELESS_MODE_N_24G,
938 txpwr_level) > txpwr_dbm)
939 txpwr_dbm =
940 _rtl92c_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G,
941 txpwr_level);
942 *powerlevel = txpwr_dbm;
943}
944
945static void _rtl92c_get_txpower_index(struct ieee80211_hw *hw, u8 channel,
946 u8 *cckpowerlevel, u8 *ofdmpowerlevel)
947{
948 struct rtl_priv *rtlpriv = rtl_priv(hw);
949 struct rtl_phy *rtlphy = &(rtlpriv->phy);
950 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
951 u8 index = (channel - 1);
952
953 cckpowerlevel[RF90_PATH_A] =
954 rtlefuse->txpwrlevel_cck[RF90_PATH_A][index];
955 cckpowerlevel[RF90_PATH_B] =
956 rtlefuse->txpwrlevel_cck[RF90_PATH_B][index];
957 if (get_rf_type(rtlphy) == RF_1T2R || get_rf_type(rtlphy) == RF_1T1R) {
958 ofdmpowerlevel[RF90_PATH_A] =
959 rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_A][index];
960 ofdmpowerlevel[RF90_PATH_B] =
961 rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_B][index];
962 } else if (get_rf_type(rtlphy) == RF_2T2R) {
963 ofdmpowerlevel[RF90_PATH_A] =
964 rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_A][index];
965 ofdmpowerlevel[RF90_PATH_B] =
966 rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_B][index];
967 }
968}
969
970static void _rtl92c_ccxpower_index_check(struct ieee80211_hw *hw,
971 u8 channel, u8 *cckpowerlevel,
972 u8 *ofdmpowerlevel)
973{
974 struct rtl_priv *rtlpriv = rtl_priv(hw);
975 struct rtl_phy *rtlphy = &(rtlpriv->phy);
976
977 rtlphy->cur_cck_txpwridx = cckpowerlevel[0];
978 rtlphy->cur_ofdm24g_txpwridx = ofdmpowerlevel[0];
979}
980
981void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
982{
983 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
984 u8 cckpowerlevel[2], ofdmpowerlevel[2];
985
986 if (rtlefuse->b_txpwr_fromeprom == false)
987 return;
988 _rtl92c_get_txpower_index(hw, channel,
989 &cckpowerlevel[0], &ofdmpowerlevel[0]);
990 _rtl92c_ccxpower_index_check(hw,
991 channel, &cckpowerlevel[0],
992 &ofdmpowerlevel[0]);
993 rtl92c_phy_rf6052_set_cck_txpower(hw, &cckpowerlevel[0]);
994 rtl92c_phy_rf6052_set_ofdm_txpower(hw, &ofdmpowerlevel[0], channel);
995}
996
997bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw, long power_indbm)
998{
999 struct rtl_priv *rtlpriv = rtl_priv(hw);
1000 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1001 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1002 u8 idx;
1003 u8 rf_path;
1004
1005 u8 ccktxpwridx = _rtl92c_phy_dbm_to_txpwr_Idx(hw,
1006 WIRELESS_MODE_B,
1007 power_indbm);
1008 u8 ofdmtxpwridx = _rtl92c_phy_dbm_to_txpwr_Idx(hw,
1009 WIRELESS_MODE_N_24G,
1010 power_indbm);
1011 if (ofdmtxpwridx - rtlefuse->legacy_ht_txpowerdiff > 0)
1012 ofdmtxpwridx -= rtlefuse->legacy_ht_txpowerdiff;
1013 else
1014 ofdmtxpwridx = 0;
1015 RT_TRACE(rtlpriv, COMP_TXAGC, DBG_TRACE,
1016 ("%lx dBm, ccktxpwridx = %d, ofdmtxpwridx = %d\n",
1017 power_indbm, ccktxpwridx, ofdmtxpwridx));
1018 for (idx = 0; idx < 14; idx++) {
1019 for (rf_path = 0; rf_path < 2; rf_path++) {
1020 rtlefuse->txpwrlevel_cck[rf_path][idx] = ccktxpwridx;
1021 rtlefuse->txpwrlevel_ht40_1s[rf_path][idx] =
1022 ofdmtxpwridx;
1023 rtlefuse->txpwrlevel_ht40_2s[rf_path][idx] =
1024 ofdmtxpwridx;
1025 }
1026 }
1027 rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
1028 return true;
1029}
1030
1031void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw, u16 beaconinterval)
1032{
1033}
1034
1035static u8 _rtl92c_phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw,
1036 enum wireless_mode wirelessmode,
1037 long power_indbm)
1038{
1039 u8 txpwridx;
1040 long offset;
1041
1042 switch (wirelessmode) {
1043 case WIRELESS_MODE_B:
1044 offset = -7;
1045 break;
1046 case WIRELESS_MODE_G:
1047 case WIRELESS_MODE_N_24G:
1048 offset = -8;
1049 break;
1050 default:
1051 offset = -8;
1052 break;
1053 }
1054
1055 if ((power_indbm - offset) > 0)
1056 txpwridx = (u8) ((power_indbm - offset) * 2);
1057 else
1058 txpwridx = 0;
1059
1060 if (txpwridx > MAX_TXPWR_IDX_NMODE_92S)
1061 txpwridx = MAX_TXPWR_IDX_NMODE_92S;
1062
1063 return txpwridx;
1064}
1065
1066static long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
1067 enum wireless_mode wirelessmode,
1068 u8 txpwridx)
1069{
1070 long offset;
1071 long pwrout_dbm;
1072
1073 switch (wirelessmode) {
1074 case WIRELESS_MODE_B:
1075 offset = -7;
1076 break;
1077 case WIRELESS_MODE_G:
1078 case WIRELESS_MODE_N_24G:
1079 offset = -8;
1080 break;
1081 default:
1082 offset = -8;
1083 break;
1084 }
1085 pwrout_dbm = txpwridx / 2 + offset;
1086 return pwrout_dbm;
1087}
1088
1089void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
1090{
1091 struct rtl_priv *rtlpriv = rtl_priv(hw);
1092 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1093 enum io_type iotype;
1094
1095 if (!is_hal_stop(rtlhal)) {
1096 switch (operation) {
1097 case SCAN_OPT_BACKUP:
1098 iotype = IO_CMD_PAUSE_DM_BY_SCAN;
1099 rtlpriv->cfg->ops->set_hw_reg(hw,
1100 HW_VAR_IO_CMD,
1101 (u8 *)&iotype);
1102
1103 break;
1104 case SCAN_OPT_RESTORE:
1105 iotype = IO_CMD_RESUME_DM_BY_SCAN;
1106 rtlpriv->cfg->ops->set_hw_reg(hw,
1107 HW_VAR_IO_CMD,
1108 (u8 *)&iotype);
1109 break;
1110 default:
1111 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1112 ("Unknown Scan Backup operation.\n"));
1113 break;
1114 }
1115 }
1116}
1117
1118void rtl92c_phy_set_bw_mode_callback(struct ieee80211_hw *hw) 370void rtl92c_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
1119{ 371{
1120 struct rtl_priv *rtlpriv = rtl_priv(hw); 372 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -1183,644 +435,6 @@ void rtl92c_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
1183 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n")); 435 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n"));
1184} 436}
1185 437
1186void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
1187 enum nl80211_channel_type ch_type)
1188{
1189 struct rtl_priv *rtlpriv = rtl_priv(hw);
1190 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1191 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1192 u8 tmp_bw = rtlphy->current_chan_bw;
1193
1194 if (rtlphy->set_bwmode_inprogress)
1195 return;
1196 rtlphy->set_bwmode_inprogress = true;
1197 if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw)))
1198 rtl92c_phy_set_bw_mode_callback(hw);
1199 else {
1200 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1201 ("FALSE driver sleep or unload\n"));
1202 rtlphy->set_bwmode_inprogress = false;
1203 rtlphy->current_chan_bw = tmp_bw;
1204 }
1205}
1206
1207void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw)
1208{
1209 struct rtl_priv *rtlpriv = rtl_priv(hw);
1210 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1211 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1212 u32 delay;
1213
1214 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
1215 ("switch to channel%d\n", rtlphy->current_channel));
1216 if (is_hal_stop(rtlhal))
1217 return;
1218 do {
1219 if (!rtlphy->sw_chnl_inprogress)
1220 break;
1221 if (!_rtl92c_phy_sw_chnl_step_by_step
1222 (hw, rtlphy->current_channel, &rtlphy->sw_chnl_stage,
1223 &rtlphy->sw_chnl_step, &delay)) {
1224 if (delay > 0)
1225 mdelay(delay);
1226 else
1227 continue;
1228 } else
1229 rtlphy->sw_chnl_inprogress = false;
1230 break;
1231 } while (true);
1232 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n"));
1233}
1234
1235u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw)
1236{
1237 struct rtl_priv *rtlpriv = rtl_priv(hw);
1238 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1239 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1240
1241 if (rtlphy->sw_chnl_inprogress)
1242 return 0;
1243 if (rtlphy->set_bwmode_inprogress)
1244 return 0;
1245 RT_ASSERT((rtlphy->current_channel <= 14),
1246 ("WIRELESS_MODE_G but channel>14"));
1247 rtlphy->sw_chnl_inprogress = true;
1248 rtlphy->sw_chnl_stage = 0;
1249 rtlphy->sw_chnl_step = 0;
1250 if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
1251 rtl92c_phy_sw_chnl_callback(hw);
1252 RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
1253 ("sw_chnl_inprogress false schdule workitem\n"));
1254 rtlphy->sw_chnl_inprogress = false;
1255 } else {
1256 RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
1257 ("sw_chnl_inprogress false driver sleep or"
1258 " unload\n"));
1259 rtlphy->sw_chnl_inprogress = false;
1260 }
1261 return 1;
1262}
1263
1264static bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
1265 u8 channel, u8 *stage, u8 *step,
1266 u32 *delay)
1267{
1268 struct rtl_priv *rtlpriv = rtl_priv(hw);
1269 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1270 struct swchnlcmd precommoncmd[MAX_PRECMD_CNT];
1271 u32 precommoncmdcnt;
1272 struct swchnlcmd postcommoncmd[MAX_POSTCMD_CNT];
1273 u32 postcommoncmdcnt;
1274 struct swchnlcmd rfdependcmd[MAX_RFDEPENDCMD_CNT];
1275 u32 rfdependcmdcnt;
1276 struct swchnlcmd *currentcmd = NULL;
1277 u8 rfpath;
1278 u8 num_total_rfpath = rtlphy->num_total_rfpath;
1279
1280 precommoncmdcnt = 0;
1281 _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
1282 MAX_PRECMD_CNT,
1283 CMDID_SET_TXPOWEROWER_LEVEL, 0, 0, 0);
1284 _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
1285 MAX_PRECMD_CNT, CMDID_END, 0, 0, 0);
1286
1287 postcommoncmdcnt = 0;
1288
1289 _rtl92c_phy_set_sw_chnl_cmdarray(postcommoncmd, postcommoncmdcnt++,
1290 MAX_POSTCMD_CNT, CMDID_END, 0, 0, 0);
1291
1292 rfdependcmdcnt = 0;
1293
1294 RT_ASSERT((channel >= 1 && channel <= 14),
1295 ("illegal channel for Zebra: %d\n", channel));
1296
1297 _rtl92c_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
1298 MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG,
1299 RF_CHNLBW, channel, 10);
1300
1301 _rtl92c_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
1302 MAX_RFDEPENDCMD_CNT, CMDID_END, 0, 0,
1303 0);
1304
1305 do {
1306 switch (*stage) {
1307 case 0:
1308 currentcmd = &precommoncmd[*step];
1309 break;
1310 case 1:
1311 currentcmd = &rfdependcmd[*step];
1312 break;
1313 case 2:
1314 currentcmd = &postcommoncmd[*step];
1315 break;
1316 }
1317
1318 if (currentcmd->cmdid == CMDID_END) {
1319 if ((*stage) == 2) {
1320 return true;
1321 } else {
1322 (*stage)++;
1323 (*step) = 0;
1324 continue;
1325 }
1326 }
1327
1328 switch (currentcmd->cmdid) {
1329 case CMDID_SET_TXPOWEROWER_LEVEL:
1330 rtl92c_phy_set_txpower_level(hw, channel);
1331 break;
1332 case CMDID_WRITEPORT_ULONG:
1333 rtl_write_dword(rtlpriv, currentcmd->para1,
1334 currentcmd->para2);
1335 break;
1336 case CMDID_WRITEPORT_USHORT:
1337 rtl_write_word(rtlpriv, currentcmd->para1,
1338 (u16) currentcmd->para2);
1339 break;
1340 case CMDID_WRITEPORT_UCHAR:
1341 rtl_write_byte(rtlpriv, currentcmd->para1,
1342 (u8) currentcmd->para2);
1343 break;
1344 case CMDID_RF_WRITEREG:
1345 for (rfpath = 0; rfpath < num_total_rfpath; rfpath++) {
1346 rtlphy->rfreg_chnlval[rfpath] =
1347 ((rtlphy->rfreg_chnlval[rfpath] &
1348 0xfffffc00) | currentcmd->para2);
1349
1350 rtl_set_rfreg(hw, (enum radio_path)rfpath,
1351 currentcmd->para1,
1352 RFREG_OFFSET_MASK,
1353 rtlphy->rfreg_chnlval[rfpath]);
1354 }
1355 break;
1356 default:
1357 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1358 ("switch case not process\n"));
1359 break;
1360 }
1361
1362 break;
1363 } while (true);
1364
1365 (*delay) = currentcmd->msdelay;
1366 (*step)++;
1367 return false;
1368}
1369
1370static bool _rtl92c_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
1371 u32 cmdtableidx, u32 cmdtablesz,
1372 enum swchnlcmd_id cmdid,
1373 u32 para1, u32 para2, u32 msdelay)
1374{
1375 struct swchnlcmd *pcmd;
1376
1377 if (cmdtable == NULL) {
1378 RT_ASSERT(false, ("cmdtable cannot be NULL.\n"));
1379 return false;
1380 }
1381
1382 if (cmdtableidx >= cmdtablesz)
1383 return false;
1384
1385 pcmd = cmdtable + cmdtableidx;
1386 pcmd->cmdid = cmdid;
1387 pcmd->para1 = para1;
1388 pcmd->para2 = para2;
1389 pcmd->msdelay = msdelay;
1390 return true;
1391}
1392
1393bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw, u32 rfpath)
1394{
1395 return true;
1396}
1397
1398static u8 _rtl92c_phy_path_a_iqk(struct ieee80211_hw *hw, bool config_pathb)
1399{
1400 u32 reg_eac, reg_e94, reg_e9c, reg_ea4;
1401 u8 result = 0x00;
1402
1403 rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x10008c1f);
1404 rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x10008c1f);
1405 rtl_set_bbreg(hw, 0xe38, MASKDWORD, 0x82140102);
1406 rtl_set_bbreg(hw, 0xe3c, MASKDWORD,
1407 config_pathb ? 0x28160202 : 0x28160502);
1408
1409 if (config_pathb) {
1410 rtl_set_bbreg(hw, 0xe50, MASKDWORD, 0x10008c22);
1411 rtl_set_bbreg(hw, 0xe54, MASKDWORD, 0x10008c22);
1412 rtl_set_bbreg(hw, 0xe58, MASKDWORD, 0x82140102);
1413 rtl_set_bbreg(hw, 0xe5c, MASKDWORD, 0x28160202);
1414 }
1415
1416 rtl_set_bbreg(hw, 0xe4c, MASKDWORD, 0x001028d1);
1417 rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf9000000);
1418 rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf8000000);
1419
1420 mdelay(IQK_DELAY_TIME);
1421
1422 reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
1423 reg_e94 = rtl_get_bbreg(hw, 0xe94, MASKDWORD);
1424 reg_e9c = rtl_get_bbreg(hw, 0xe9c, MASKDWORD);
1425 reg_ea4 = rtl_get_bbreg(hw, 0xea4, MASKDWORD);
1426
1427 if (!(reg_eac & BIT(28)) &&
1428 (((reg_e94 & 0x03FF0000) >> 16) != 0x142) &&
1429 (((reg_e9c & 0x03FF0000) >> 16) != 0x42))
1430 result |= 0x01;
1431 else
1432 return result;
1433
1434 if (!(reg_eac & BIT(27)) &&
1435 (((reg_ea4 & 0x03FF0000) >> 16) != 0x132) &&
1436 (((reg_eac & 0x03FF0000) >> 16) != 0x36))
1437 result |= 0x02;
1438 return result;
1439}
1440
1441static u8 _rtl92c_phy_path_b_iqk(struct ieee80211_hw *hw)
1442{
1443 u32 reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc;
1444 u8 result = 0x00;
1445
1446 rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000002);
1447 rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000000);
1448 mdelay(IQK_DELAY_TIME);
1449 reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
1450 reg_eb4 = rtl_get_bbreg(hw, 0xeb4, MASKDWORD);
1451 reg_ebc = rtl_get_bbreg(hw, 0xebc, MASKDWORD);
1452 reg_ec4 = rtl_get_bbreg(hw, 0xec4, MASKDWORD);
1453 reg_ecc = rtl_get_bbreg(hw, 0xecc, MASKDWORD);
1454 if (!(reg_eac & BIT(31)) &&
1455 (((reg_eb4 & 0x03FF0000) >> 16) != 0x142) &&
1456 (((reg_ebc & 0x03FF0000) >> 16) != 0x42))
1457 result |= 0x01;
1458 else
1459 return result;
1460
1461 if (!(reg_eac & BIT(30)) &&
1462 (((reg_ec4 & 0x03FF0000) >> 16) != 0x132) &&
1463 (((reg_ecc & 0x03FF0000) >> 16) != 0x36))
1464 result |= 0x02;
1465 return result;
1466}
1467
1468static void _rtl92c_phy_path_a_fill_iqk_matrix(struct ieee80211_hw *hw,
1469 bool b_iqk_ok, long result[][8],
1470 u8 final_candidate, bool btxonly)
1471{
1472 u32 oldval_0, x, tx0_a, reg;
1473 long y, tx0_c;
1474
1475 if (final_candidate == 0xFF)
1476 return;
1477 else if (b_iqk_ok) {
1478 oldval_0 = (rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
1479 MASKDWORD) >> 22) & 0x3FF;
1480 x = result[final_candidate][0];
1481 if ((x & 0x00000200) != 0)
1482 x = x | 0xFFFFFC00;
1483 tx0_a = (x * oldval_0) >> 8;
1484 rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x3FF, tx0_a);
1485 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(31),
1486 ((x * oldval_0 >> 7) & 0x1));
1487 y = result[final_candidate][1];
1488 if ((y & 0x00000200) != 0)
1489 y = y | 0xFFFFFC00;
1490 tx0_c = (y * oldval_0) >> 8;
1491 rtl_set_bbreg(hw, ROFDM0_XCTXAFE, 0xF0000000,
1492 ((tx0_c & 0x3C0) >> 6));
1493 rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x003F0000,
1494 (tx0_c & 0x3F));
1495 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(29),
1496 ((y * oldval_0 >> 7) & 0x1));
1497 if (btxonly)
1498 return;
1499 reg = result[final_candidate][2];
1500 rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0x3FF, reg);
1501 reg = result[final_candidate][3] & 0x3F;
1502 rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0xFC00, reg);
1503 reg = (result[final_candidate][3] >> 6) & 0xF;
1504 rtl_set_bbreg(hw, 0xca0, 0xF0000000, reg);
1505 }
1506}
1507
1508static void _rtl92c_phy_path_b_fill_iqk_matrix(struct ieee80211_hw *hw,
1509 bool b_iqk_ok, long result[][8],
1510 u8 final_candidate, bool btxonly)
1511{
1512 u32 oldval_1, x, tx1_a, reg;
1513 long y, tx1_c;
1514
1515 if (final_candidate == 0xFF)
1516 return;
1517 else if (b_iqk_ok) {
1518 oldval_1 = (rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE,
1519 MASKDWORD) >> 22) & 0x3FF;
1520 x = result[final_candidate][4];
1521 if ((x & 0x00000200) != 0)
1522 x = x | 0xFFFFFC00;
1523 tx1_a = (x * oldval_1) >> 8;
1524 rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 0x3FF, tx1_a);
1525 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(27),
1526 ((x * oldval_1 >> 7) & 0x1));
1527 y = result[final_candidate][5];
1528 if ((y & 0x00000200) != 0)
1529 y = y | 0xFFFFFC00;
1530 tx1_c = (y * oldval_1) >> 8;
1531 rtl_set_bbreg(hw, ROFDM0_XDTXAFE, 0xF0000000,
1532 ((tx1_c & 0x3C0) >> 6));
1533 rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 0x003F0000,
1534 (tx1_c & 0x3F));
1535 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(25),
1536 ((y * oldval_1 >> 7) & 0x1));
1537 if (btxonly)
1538 return;
1539 reg = result[final_candidate][6];
1540 rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0x3FF, reg);
1541 reg = result[final_candidate][7] & 0x3F;
1542 rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0xFC00, reg);
1543 reg = (result[final_candidate][7] >> 6) & 0xF;
1544 rtl_set_bbreg(hw, ROFDM0_AGCRSSITABLE, 0x0000F000, reg);
1545 }
1546}
1547
1548static void _rtl92c_phy_save_adda_registers(struct ieee80211_hw *hw,
1549 u32 *addareg, u32 *addabackup,
1550 u32 registernum)
1551{
1552 u32 i;
1553
1554 for (i = 0; i < registernum; i++)
1555 addabackup[i] = rtl_get_bbreg(hw, addareg[i], MASKDWORD);
1556}
1557
1558static void _rtl92c_phy_save_mac_registers(struct ieee80211_hw *hw,
1559 u32 *macreg, u32 *macbackup)
1560{
1561 struct rtl_priv *rtlpriv = rtl_priv(hw);
1562 u32 i;
1563
1564 for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
1565 macbackup[i] = rtl_read_byte(rtlpriv, macreg[i]);
1566 macbackup[i] = rtl_read_dword(rtlpriv, macreg[i]);
1567}
1568
1569static void _rtl92c_phy_reload_adda_registers(struct ieee80211_hw *hw,
1570 u32 *addareg, u32 *addabackup,
1571 u32 regiesternum)
1572{
1573 u32 i;
1574
1575 for (i = 0; i < regiesternum; i++)
1576 rtl_set_bbreg(hw, addareg[i], MASKDWORD, addabackup[i]);
1577}
1578
1579static void _rtl92c_phy_reload_mac_registers(struct ieee80211_hw *hw,
1580 u32 *macreg, u32 *macbackup)
1581{
1582 struct rtl_priv *rtlpriv = rtl_priv(hw);
1583 u32 i;
1584
1585 for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
1586 rtl_write_byte(rtlpriv, macreg[i], (u8) macbackup[i]);
1587 rtl_write_dword(rtlpriv, macreg[i], macbackup[i]);
1588}
1589
1590static void _rtl92c_phy_path_adda_on(struct ieee80211_hw *hw,
1591 u32 *addareg, bool is_patha_on, bool is2t)
1592{
1593 u32 pathOn;
1594 u32 i;
1595
1596 pathOn = is_patha_on ? 0x04db25a4 : 0x0b1b25a4;
1597 if (false == is2t) {
1598 pathOn = 0x0bdb25a0;
1599 rtl_set_bbreg(hw, addareg[0], MASKDWORD, 0x0b1b25a0);
1600 } else {
1601 rtl_set_bbreg(hw, addareg[0], MASKDWORD, pathOn);
1602 }
1603
1604 for (i = 1; i < IQK_ADDA_REG_NUM; i++)
1605 rtl_set_bbreg(hw, addareg[i], MASKDWORD, pathOn);
1606}
1607
1608static void _rtl92c_phy_mac_setting_calibration(struct ieee80211_hw *hw,
1609 u32 *macreg, u32 *macbackup)
1610{
1611 struct rtl_priv *rtlpriv = rtl_priv(hw);
1612 u32 i;
1613
1614 rtl_write_byte(rtlpriv, macreg[0], 0x3F);
1615
1616 for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++)
1617 rtl_write_byte(rtlpriv, macreg[i],
1618 (u8) (macbackup[i] & (~BIT(3))));
1619 rtl_write_byte(rtlpriv, macreg[i], (u8) (macbackup[i] & (~BIT(5))));
1620}
1621
1622static void _rtl92c_phy_path_a_standby(struct ieee80211_hw *hw)
1623{
1624 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x0);
1625 rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
1626 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
1627}
1628
1629static void _rtl92c_phy_pi_mode_switch(struct ieee80211_hw *hw, bool pi_mode)
1630{
1631 u32 mode;
1632
1633 mode = pi_mode ? 0x01000100 : 0x01000000;
1634 rtl_set_bbreg(hw, 0x820, MASKDWORD, mode);
1635 rtl_set_bbreg(hw, 0x828, MASKDWORD, mode);
1636}
1637
1638static bool _rtl92c_phy_simularity_compare(struct ieee80211_hw *hw,
1639 long result[][8], u8 c1, u8 c2)
1640{
1641 u32 i, j, diff, simularity_bitmap, bound;
1642 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1643
1644 u8 final_candidate[2] = { 0xFF, 0xFF };
1645 bool bresult = true, is2t = IS_92C_SERIAL(rtlhal->version);
1646
1647 if (is2t)
1648 bound = 8;
1649 else
1650 bound = 4;
1651
1652 simularity_bitmap = 0;
1653
1654 for (i = 0; i < bound; i++) {
1655 diff = (result[c1][i] > result[c2][i]) ?
1656 (result[c1][i] - result[c2][i]) :
1657 (result[c2][i] - result[c1][i]);
1658
1659 if (diff > MAX_TOLERANCE) {
1660 if ((i == 2 || i == 6) && !simularity_bitmap) {
1661 if (result[c1][i] + result[c1][i + 1] == 0)
1662 final_candidate[(i / 4)] = c2;
1663 else if (result[c2][i] + result[c2][i + 1] == 0)
1664 final_candidate[(i / 4)] = c1;
1665 else
1666 simularity_bitmap = simularity_bitmap |
1667 (1 << i);
1668 } else
1669 simularity_bitmap =
1670 simularity_bitmap | (1 << i);
1671 }
1672 }
1673
1674 if (simularity_bitmap == 0) {
1675 for (i = 0; i < (bound / 4); i++) {
1676 if (final_candidate[i] != 0xFF) {
1677 for (j = i * 4; j < (i + 1) * 4 - 2; j++)
1678 result[3][j] =
1679 result[final_candidate[i]][j];
1680 bresult = false;
1681 }
1682 }
1683 return bresult;
1684 } else if (!(simularity_bitmap & 0x0F)) {
1685 for (i = 0; i < 4; i++)
1686 result[3][i] = result[c1][i];
1687 return false;
1688 } else if (!(simularity_bitmap & 0xF0) && is2t) {
1689 for (i = 4; i < 8; i++)
1690 result[3][i] = result[c1][i];
1691 return false;
1692 } else {
1693 return false;
1694 }
1695
1696}
1697
1698static void _rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw,
1699 long result[][8], u8 t, bool is2t)
1700{
1701 struct rtl_priv *rtlpriv = rtl_priv(hw);
1702 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1703 u32 i;
1704 u8 patha_ok, pathb_ok;
1705 u32 adda_reg[IQK_ADDA_REG_NUM] = {
1706 0x85c, 0xe6c, 0xe70, 0xe74,
1707 0xe78, 0xe7c, 0xe80, 0xe84,
1708 0xe88, 0xe8c, 0xed0, 0xed4,
1709 0xed8, 0xedc, 0xee0, 0xeec
1710 };
1711
1712 u32 iqk_mac_reg[IQK_MAC_REG_NUM] = {
1713 0x522, 0x550, 0x551, 0x040
1714 };
1715
1716 const u32 retrycount = 2;
1717
1718 u32 bbvalue;
1719
1720 if (t == 0) {
1721 bbvalue = rtl_get_bbreg(hw, 0x800, MASKDWORD);
1722
1723 _rtl92c_phy_save_adda_registers(hw, adda_reg,
1724 rtlphy->adda_backup, 16);
1725 _rtl92c_phy_save_mac_registers(hw, iqk_mac_reg,
1726 rtlphy->iqk_mac_backup);
1727 }
1728 _rtl92c_phy_path_adda_on(hw, adda_reg, true, is2t);
1729 if (t == 0) {
1730 rtlphy->b_rfpi_enable = (u8) rtl_get_bbreg(hw,
1731 RFPGA0_XA_HSSIPARAMETER1,
1732 BIT(8));
1733 }
1734 if (!rtlphy->b_rfpi_enable)
1735 _rtl92c_phy_pi_mode_switch(hw, true);
1736 if (t == 0) {
1737 rtlphy->reg_c04 = rtl_get_bbreg(hw, 0xc04, MASKDWORD);
1738 rtlphy->reg_c08 = rtl_get_bbreg(hw, 0xc08, MASKDWORD);
1739 rtlphy->reg_874 = rtl_get_bbreg(hw, 0x874, MASKDWORD);
1740 }
1741 rtl_set_bbreg(hw, 0xc04, MASKDWORD, 0x03a05600);
1742 rtl_set_bbreg(hw, 0xc08, MASKDWORD, 0x000800e4);
1743 rtl_set_bbreg(hw, 0x874, MASKDWORD, 0x22204000);
1744 if (is2t) {
1745 rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
1746 rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00010000);
1747 }
1748 _rtl92c_phy_mac_setting_calibration(hw, iqk_mac_reg,
1749 rtlphy->iqk_mac_backup);
1750 rtl_set_bbreg(hw, 0xb68, MASKDWORD, 0x00080000);
1751 if (is2t)
1752 rtl_set_bbreg(hw, 0xb6c, MASKDWORD, 0x00080000);
1753 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
1754 rtl_set_bbreg(hw, 0xe40, MASKDWORD, 0x01007c00);
1755 rtl_set_bbreg(hw, 0xe44, MASKDWORD, 0x01004800);
1756 for (i = 0; i < retrycount; i++) {
1757 patha_ok = _rtl92c_phy_path_a_iqk(hw, is2t);
1758 if (patha_ok == 0x03) {
1759 result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
1760 0x3FF0000) >> 16;
1761 result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
1762 0x3FF0000) >> 16;
1763 result[t][2] = (rtl_get_bbreg(hw, 0xea4, MASKDWORD) &
1764 0x3FF0000) >> 16;
1765 result[t][3] = (rtl_get_bbreg(hw, 0xeac, MASKDWORD) &
1766 0x3FF0000) >> 16;
1767 break;
1768 } else if (i == (retrycount - 1) && patha_ok == 0x01)
1769 result[t][0] = (rtl_get_bbreg(hw, 0xe94,
1770 MASKDWORD) & 0x3FF0000) >>
1771 16;
1772 result[t][1] =
1773 (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) & 0x3FF0000) >> 16;
1774
1775 }
1776
1777 if (is2t) {
1778 _rtl92c_phy_path_a_standby(hw);
1779 _rtl92c_phy_path_adda_on(hw, adda_reg, false, is2t);
1780 for (i = 0; i < retrycount; i++) {
1781 pathb_ok = _rtl92c_phy_path_b_iqk(hw);
1782 if (pathb_ok == 0x03) {
1783 result[t][4] = (rtl_get_bbreg(hw,
1784 0xeb4,
1785 MASKDWORD) &
1786 0x3FF0000) >> 16;
1787 result[t][5] =
1788 (rtl_get_bbreg(hw, 0xebc, MASKDWORD) &
1789 0x3FF0000) >> 16;
1790 result[t][6] =
1791 (rtl_get_bbreg(hw, 0xec4, MASKDWORD) &
1792 0x3FF0000) >> 16;
1793 result[t][7] =
1794 (rtl_get_bbreg(hw, 0xecc, MASKDWORD) &
1795 0x3FF0000) >> 16;
1796 break;
1797 } else if (i == (retrycount - 1) && pathb_ok == 0x01) {
1798 result[t][4] = (rtl_get_bbreg(hw,
1799 0xeb4,
1800 MASKDWORD) &
1801 0x3FF0000) >> 16;
1802 }
1803 result[t][5] = (rtl_get_bbreg(hw, 0xebc, MASKDWORD) &
1804 0x3FF0000) >> 16;
1805 }
1806 }
1807 rtl_set_bbreg(hw, 0xc04, MASKDWORD, rtlphy->reg_c04);
1808 rtl_set_bbreg(hw, 0x874, MASKDWORD, rtlphy->reg_874);
1809 rtl_set_bbreg(hw, 0xc08, MASKDWORD, rtlphy->reg_c08);
1810 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0);
1811 rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00032ed3);
1812 if (is2t)
1813 rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00032ed3);
1814 if (t != 0) {
1815 if (!rtlphy->b_rfpi_enable)
1816 _rtl92c_phy_pi_mode_switch(hw, false);
1817 _rtl92c_phy_reload_adda_registers(hw, adda_reg,
1818 rtlphy->adda_backup, 16);
1819 _rtl92c_phy_reload_mac_registers(hw, iqk_mac_reg,
1820 rtlphy->iqk_mac_backup);
1821 }
1822}
1823
1824static void _rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t) 438static void _rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
1825{ 439{
1826 u8 tmpreg; 440 u8 tmpreg;
@@ -1866,666 +480,6 @@ static void _rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
1866 } 480 }
1867} 481}
1868 482
1869static void _rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw,
1870 char delta, bool is2t)
1871{
1872 /* This routine is deliberately dummied out for later fixes */
1873#if 0
1874 struct rtl_priv *rtlpriv = rtl_priv(hw);
1875 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1876 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1877
1878 u32 reg_d[PATH_NUM];
1879 u32 tmpreg, index, offset, path, i, pathbound = PATH_NUM, apkbound;
1880
1881 u32 bb_backup[APK_BB_REG_NUM];
1882 u32 bb_reg[APK_BB_REG_NUM] = {
1883 0x904, 0xc04, 0x800, 0xc08, 0x874
1884 };
1885 u32 bb_ap_mode[APK_BB_REG_NUM] = {
1886 0x00000020, 0x00a05430, 0x02040000,
1887 0x000800e4, 0x00204000
1888 };
1889 u32 bb_normal_ap_mode[APK_BB_REG_NUM] = {
1890 0x00000020, 0x00a05430, 0x02040000,
1891 0x000800e4, 0x22204000
1892 };
1893
1894 u32 afe_backup[APK_AFE_REG_NUM];
1895 u32 afe_reg[APK_AFE_REG_NUM] = {
1896 0x85c, 0xe6c, 0xe70, 0xe74, 0xe78,
1897 0xe7c, 0xe80, 0xe84, 0xe88, 0xe8c,
1898 0xed0, 0xed4, 0xed8, 0xedc, 0xee0,
1899 0xeec
1900 };
1901
1902 u32 mac_backup[IQK_MAC_REG_NUM];
1903 u32 mac_reg[IQK_MAC_REG_NUM] = {
1904 0x522, 0x550, 0x551, 0x040
1905 };
1906
1907 u32 apk_rf_init_value[PATH_NUM][APK_BB_REG_NUM] = {
1908 {0x0852c, 0x1852c, 0x5852c, 0x1852c, 0x5852c},
1909 {0x2852e, 0x0852e, 0x3852e, 0x0852e, 0x0852e}
1910 };
1911
1912 u32 apk_normal_rf_init_value[PATH_NUM][APK_BB_REG_NUM] = {
1913 {0x0852c, 0x0a52c, 0x3a52c, 0x5a52c, 0x5a52c},
1914 {0x0852c, 0x0a52c, 0x5a52c, 0x5a52c, 0x5a52c}
1915 };
1916
1917 u32 apk_rf_value_0[PATH_NUM][APK_BB_REG_NUM] = {
1918 {0x52019, 0x52014, 0x52013, 0x5200f, 0x5208d},
1919 {0x5201a, 0x52019, 0x52016, 0x52033, 0x52050}
1920 };
1921
1922 u32 apk_normal_rf_value_0[PATH_NUM][APK_BB_REG_NUM] = {
1923 {0x52019, 0x52017, 0x52010, 0x5200d, 0x5206a},
1924 {0x52019, 0x52017, 0x52010, 0x5200d, 0x5206a}
1925 };
1926
1927 u32 afe_on_off[PATH_NUM] = {
1928 0x04db25a4, 0x0b1b25a4
1929 };
1930
1931 u32 apk_offset[PATH_NUM] = { 0xb68, 0xb6c };
1932
1933 u32 apk_normal_offset[PATH_NUM] = { 0xb28, 0xb98 };
1934
1935 u32 apk_value[PATH_NUM] = { 0x92fc0000, 0x12fc0000 };
1936
1937 u32 apk_normal_value[PATH_NUM] = { 0x92680000, 0x12680000 };
1938
1939 const char apk_delta_mapping[APK_BB_REG_NUM][13] = {
1940 {-4, -3, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
1941 {-4, -3, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
1942 {-6, -4, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
1943 {-1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6},
1944 {-11, -9, -7, -5, -3, -1, 0, 0, 0, 0, 0, 0, 0}
1945 };
1946
1947 const u32 apk_normal_setting_value_1[13] = {
1948 0x01017018, 0xf7ed8f84, 0x1b1a1816, 0x2522201e, 0x322e2b28,
1949 0x433f3a36, 0x5b544e49, 0x7b726a62, 0xa69a8f84, 0xdfcfc0b3,
1950 0x12680000, 0x00880000, 0x00880000
1951 };
1952
1953 const u32 apk_normal_setting_value_2[16] = {
1954 0x01c7021d, 0x01670183, 0x01000123, 0x00bf00e2, 0x008d00a3,
1955 0x0068007b, 0x004d0059, 0x003a0042, 0x002b0031, 0x001f0025,
1956 0x0017001b, 0x00110014, 0x000c000f, 0x0009000b, 0x00070008,
1957 0x00050006
1958 };
1959
1960 const u32 apk_result[PATH_NUM][APK_BB_REG_NUM];
1961
1962 long bb_offset, delta_v, delta_offset;
1963
1964 if (!is2t)
1965 pathbound = 1;
1966
1967 for (index = 0; index < PATH_NUM; index++) {
1968 apk_offset[index] = apk_normal_offset[index];
1969 apk_value[index] = apk_normal_value[index];
1970 afe_on_off[index] = 0x6fdb25a4;
1971 }
1972
1973 for (index = 0; index < APK_BB_REG_NUM; index++) {
1974 for (path = 0; path < pathbound; path++) {
1975 apk_rf_init_value[path][index] =
1976 apk_normal_rf_init_value[path][index];
1977 apk_rf_value_0[path][index] =
1978 apk_normal_rf_value_0[path][index];
1979 }
1980 bb_ap_mode[index] = bb_normal_ap_mode[index];
1981
1982 apkbound = 6;
1983 }
1984
1985 for (index = 0; index < APK_BB_REG_NUM; index++) {
1986 if (index == 0)
1987 continue;
1988 bb_backup[index] = rtl_get_bbreg(hw, bb_reg[index], MASKDWORD);
1989 }
1990
1991 _rtl92c_phy_save_mac_registers(hw, mac_reg, mac_backup);
1992
1993 _rtl92c_phy_save_adda_registers(hw, afe_reg, afe_backup, 16);
1994
1995 for (path = 0; path < pathbound; path++) {
1996 if (path == RF90_PATH_A) {
1997 offset = 0xb00;
1998 for (index = 0; index < 11; index++) {
1999 rtl_set_bbreg(hw, offset, MASKDWORD,
2000 apk_normal_setting_value_1
2001 [index]);
2002
2003 offset += 0x04;
2004 }
2005
2006 rtl_set_bbreg(hw, 0xb98, MASKDWORD, 0x12680000);
2007
2008 offset = 0xb68;
2009 for (; index < 13; index++) {
2010 rtl_set_bbreg(hw, offset, MASKDWORD,
2011 apk_normal_setting_value_1
2012 [index]);
2013
2014 offset += 0x04;
2015 }
2016
2017 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x40000000);
2018
2019 offset = 0xb00;
2020 for (index = 0; index < 16; index++) {
2021 rtl_set_bbreg(hw, offset, MASKDWORD,
2022 apk_normal_setting_value_2
2023 [index]);
2024
2025 offset += 0x04;
2026 }
2027 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000);
2028 } else if (path == RF90_PATH_B) {
2029 offset = 0xb70;
2030 for (index = 0; index < 10; index++) {
2031 rtl_set_bbreg(hw, offset, MASKDWORD,
2032 apk_normal_setting_value_1
2033 [index]);
2034
2035 offset += 0x04;
2036 }
2037 rtl_set_bbreg(hw, 0xb28, MASKDWORD, 0x12680000);
2038 rtl_set_bbreg(hw, 0xb98, MASKDWORD, 0x12680000);
2039
2040 offset = 0xb68;
2041 index = 11;
2042 for (; index < 13; index++) {
2043 rtl_set_bbreg(hw, offset, MASKDWORD,
2044 apk_normal_setting_value_1
2045 [index]);
2046
2047 offset += 0x04;
2048 }
2049
2050 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x40000000);
2051
2052 offset = 0xb60;
2053 for (index = 0; index < 16; index++) {
2054 rtl_set_bbreg(hw, offset, MASKDWORD,
2055 apk_normal_setting_value_2
2056 [index]);
2057
2058 offset += 0x04;
2059 }
2060 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000);
2061 }
2062
2063 reg_d[path] = rtl_get_rfreg(hw, (enum radio_path)path,
2064 0xd, MASKDWORD);
2065
2066 for (index = 0; index < APK_AFE_REG_NUM; index++)
2067 rtl_set_bbreg(hw, afe_reg[index], MASKDWORD,
2068 afe_on_off[path]);
2069
2070 if (path == RF90_PATH_A) {
2071 for (index = 0; index < APK_BB_REG_NUM; index++) {
2072 if (index == 0)
2073 continue;
2074 rtl_set_bbreg(hw, bb_reg[index], MASKDWORD,
2075 bb_ap_mode[index]);
2076 }
2077 }
2078
2079 _rtl92c_phy_mac_setting_calibration(hw, mac_reg, mac_backup);
2080
2081 if (path == 0) {
2082 rtl_set_rfreg(hw, RF90_PATH_B, 0x0, MASKDWORD, 0x10000);
2083 } else {
2084 rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASKDWORD,
2085 0x10000);
2086 rtl_set_rfreg(hw, RF90_PATH_A, 0x10, MASKDWORD,
2087 0x1000f);
2088 rtl_set_rfreg(hw, RF90_PATH_A, 0x11, MASKDWORD,
2089 0x20103);
2090 }
2091
2092 delta_offset = ((delta + 14) / 2);
2093 if (delta_offset < 0)
2094 delta_offset = 0;
2095 else if (delta_offset > 12)
2096 delta_offset = 12;
2097
2098 for (index = 0; index < APK_BB_REG_NUM; index++) {
2099 if (index != 1)
2100 continue;
2101
2102 tmpreg = apk_rf_init_value[path][index];
2103
2104 if (!rtlefuse->b_apk_thermalmeterignore) {
2105 bb_offset = (tmpreg & 0xF0000) >> 16;
2106
2107 if (!(tmpreg & BIT(15)))
2108 bb_offset = -bb_offset;
2109
2110 delta_v =
2111 apk_delta_mapping[index][delta_offset];
2112
2113 bb_offset += delta_v;
2114
2115 if (bb_offset < 0) {
2116 tmpreg = tmpreg & (~BIT(15));
2117 bb_offset = -bb_offset;
2118 } else {
2119 tmpreg = tmpreg | BIT(15);
2120 }
2121
2122 tmpreg =
2123 (tmpreg & 0xFFF0FFFF) | (bb_offset << 16);
2124 }
2125
2126 rtl_set_rfreg(hw, (enum radio_path)path, 0xc,
2127 MASKDWORD, 0x8992e);
2128 rtl_set_rfreg(hw, (enum radio_path)path, 0x0,
2129 MASKDWORD, apk_rf_value_0[path][index]);
2130 rtl_set_rfreg(hw, (enum radio_path)path, 0xd,
2131 MASKDWORD, tmpreg);
2132
2133 i = 0;
2134 do {
2135 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80000000);
2136 rtl_set_bbreg(hw, apk_offset[path],
2137 MASKDWORD, apk_value[0]);
2138 RTPRINT(rtlpriv, FINIT, INIT_IQK,
2139 ("PHY_APCalibrate() offset 0x%x "
2140 "value 0x%x\n",
2141 apk_offset[path],
2142 rtl_get_bbreg(hw, apk_offset[path],
2143 MASKDWORD)));
2144
2145 mdelay(3);
2146
2147 rtl_set_bbreg(hw, apk_offset[path],
2148 MASKDWORD, apk_value[1]);
2149 RTPRINT(rtlpriv, FINIT, INIT_IQK,
2150 ("PHY_APCalibrate() offset 0x%x "
2151 "value 0x%x\n",
2152 apk_offset[path],
2153 rtl_get_bbreg(hw, apk_offset[path],
2154 MASKDWORD)));
2155
2156 mdelay(20);
2157
2158 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000);
2159
2160 if (path == RF90_PATH_A)
2161 tmpreg = rtl_get_bbreg(hw, 0xbd8,
2162 0x03E00000);
2163 else
2164 tmpreg = rtl_get_bbreg(hw, 0xbd8,
2165 0xF8000000);
2166
2167 RTPRINT(rtlpriv, FINIT, INIT_IQK,
2168 ("PHY_APCalibrate() offset "
2169 "0xbd8[25:21] %x\n", tmpreg));
2170
2171 i++;
2172
2173 } while (tmpreg > apkbound && i < 4);
2174
2175 apk_result[path][index] = tmpreg;
2176 }
2177 }
2178
2179 _rtl92c_phy_reload_mac_registers(hw, mac_reg, mac_backup);
2180
2181 for (index = 0; index < APK_BB_REG_NUM; index++) {
2182 if (index == 0)
2183 continue;
2184 rtl_set_bbreg(hw, bb_reg[index], MASKDWORD, bb_backup[index]);
2185 }
2186
2187 _rtl92c_phy_reload_adda_registers(hw, afe_reg, afe_backup, 16);
2188
2189 for (path = 0; path < pathbound; path++) {
2190 rtl_set_rfreg(hw, (enum radio_path)path, 0xd,
2191 MASKDWORD, reg_d[path]);
2192
2193 if (path == RF90_PATH_B) {
2194 rtl_set_rfreg(hw, RF90_PATH_A, 0x10, MASKDWORD,
2195 0x1000f);
2196 rtl_set_rfreg(hw, RF90_PATH_A, 0x11, MASKDWORD,
2197 0x20101);
2198 }
2199
2200 if (apk_result[path][1] > 6)
2201 apk_result[path][1] = 6;
2202 }
2203
2204 for (path = 0; path < pathbound; path++) {
2205 rtl_set_rfreg(hw, (enum radio_path)path, 0x3, MASKDWORD,
2206 ((apk_result[path][1] << 15) |
2207 (apk_result[path][1] << 10) |
2208 (apk_result[path][1] << 5) |
2209 apk_result[path][1]));
2210
2211 if (path == RF90_PATH_A)
2212 rtl_set_rfreg(hw, (enum radio_path)path, 0x4, MASKDWORD,
2213 ((apk_result[path][1] << 15) |
2214 (apk_result[path][1] << 10) |
2215 (0x00 << 5) | 0x05));
2216 else
2217 rtl_set_rfreg(hw, (enum radio_path)path, 0x4, MASKDWORD,
2218 ((apk_result[path][1] << 15) |
2219 (apk_result[path][1] << 10) |
2220 (0x02 << 5) | 0x05));
2221
2222 rtl_set_rfreg(hw, (enum radio_path)path, 0xe, MASKDWORD,
2223 ((0x08 << 15) | (0x08 << 10) | (0x08 << 5) |
2224 0x08));
2225
2226 }
2227
2228 rtlphy->b_apk_done = true;
2229#endif
2230}
2231
2232static void _rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw,
2233 bool bmain, bool is2t)
2234{
2235 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
2236
2237 if (is_hal_stop(rtlhal)) {
2238 rtl_set_bbreg(hw, REG_LEDCFG0, BIT(23), 0x01);
2239 rtl_set_bbreg(hw, rFPGA0_XAB_RFPARAMETER, BIT(13), 0x01);
2240 }
2241 if (is2t) {
2242 if (bmain)
2243 rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
2244 BIT(5) | BIT(6), 0x1);
2245 else
2246 rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
2247 BIT(5) | BIT(6), 0x2);
2248 } else {
2249 if (bmain)
2250 rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300, 0x2);
2251 else
2252 rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300, 0x1);
2253
2254 }
2255}
2256
2257#undef IQK_ADDA_REG_NUM
2258#undef IQK_DELAY_TIME
2259
2260void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
2261{
2262 struct rtl_priv *rtlpriv = rtl_priv(hw);
2263 struct rtl_phy *rtlphy = &(rtlpriv->phy);
2264 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
2265
2266 long result[4][8];
2267 u8 i, final_candidate;
2268 bool b_patha_ok, b_pathb_ok;
2269 long reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4,
2270 reg_ecc, reg_tmp = 0;
2271 bool is12simular, is13simular, is23simular;
2272 bool b_start_conttx = false, b_singletone = false;
2273 u32 iqk_bb_reg[10] = {
2274 ROFDM0_XARXIQIMBALANCE,
2275 ROFDM0_XBRXIQIMBALANCE,
2276 ROFDM0_ECCATHRESHOLD,
2277 ROFDM0_AGCRSSITABLE,
2278 ROFDM0_XATXIQIMBALANCE,
2279 ROFDM0_XBTXIQIMBALANCE,
2280 ROFDM0_XCTXIQIMBALANCE,
2281 ROFDM0_XCTXAFE,
2282 ROFDM0_XDTXAFE,
2283 ROFDM0_RXIQEXTANTA
2284 };
2285
2286 if (b_recovery) {
2287 _rtl92c_phy_reload_adda_registers(hw,
2288 iqk_bb_reg,
2289 rtlphy->iqk_bb_backup, 10);
2290 return;
2291 }
2292 if (b_start_conttx || b_singletone)
2293 return;
2294 for (i = 0; i < 8; i++) {
2295 result[0][i] = 0;
2296 result[1][i] = 0;
2297 result[2][i] = 0;
2298 result[3][i] = 0;
2299 }
2300 final_candidate = 0xff;
2301 b_patha_ok = false;
2302 b_pathb_ok = false;
2303 is12simular = false;
2304 is23simular = false;
2305 is13simular = false;
2306 for (i = 0; i < 3; i++) {
2307 if (IS_92C_SERIAL(rtlhal->version))
2308 _rtl92c_phy_iq_calibrate(hw, result, i, true);
2309 else
2310 _rtl92c_phy_iq_calibrate(hw, result, i, false);
2311 if (i == 1) {
2312 is12simular = _rtl92c_phy_simularity_compare(hw,
2313 result, 0,
2314 1);
2315 if (is12simular) {
2316 final_candidate = 0;
2317 break;
2318 }
2319 }
2320 if (i == 2) {
2321 is13simular = _rtl92c_phy_simularity_compare(hw,
2322 result, 0,
2323 2);
2324 if (is13simular) {
2325 final_candidate = 0;
2326 break;
2327 }
2328 is23simular = _rtl92c_phy_simularity_compare(hw,
2329 result, 1,
2330 2);
2331 if (is23simular)
2332 final_candidate = 1;
2333 else {
2334 for (i = 0; i < 8; i++)
2335 reg_tmp += result[3][i];
2336
2337 if (reg_tmp != 0)
2338 final_candidate = 3;
2339 else
2340 final_candidate = 0xFF;
2341 }
2342 }
2343 }
2344 for (i = 0; i < 4; i++) {
2345 reg_e94 = result[i][0];
2346 reg_e9c = result[i][1];
2347 reg_ea4 = result[i][2];
2348 reg_eac = result[i][3];
2349 reg_eb4 = result[i][4];
2350 reg_ebc = result[i][5];
2351 reg_ec4 = result[i][6];
2352 reg_ecc = result[i][7];
2353 }
2354 if (final_candidate != 0xff) {
2355 rtlphy->reg_e94 = reg_e94 = result[final_candidate][0];
2356 rtlphy->reg_e9c = reg_e9c = result[final_candidate][1];
2357 reg_ea4 = result[final_candidate][2];
2358 reg_eac = result[final_candidate][3];
2359 rtlphy->reg_eb4 = reg_eb4 = result[final_candidate][4];
2360 rtlphy->reg_ebc = reg_ebc = result[final_candidate][5];
2361 reg_ec4 = result[final_candidate][6];
2362 reg_ecc = result[final_candidate][7];
2363 b_patha_ok = b_pathb_ok = true;
2364 } else {
2365 rtlphy->reg_e94 = rtlphy->reg_eb4 = 0x100;
2366 rtlphy->reg_e9c = rtlphy->reg_ebc = 0x0;
2367 }
2368 if (reg_e94 != 0) /*&&(reg_ea4 != 0) */
2369 _rtl92c_phy_path_a_fill_iqk_matrix(hw, b_patha_ok, result,
2370 final_candidate,
2371 (reg_ea4 == 0));
2372 if (IS_92C_SERIAL(rtlhal->version)) {
2373 if (reg_eb4 != 0) /*&&(reg_ec4 != 0) */
2374 _rtl92c_phy_path_b_fill_iqk_matrix(hw, b_pathb_ok,
2375 result,
2376 final_candidate,
2377 (reg_ec4 == 0));
2378 }
2379 _rtl92c_phy_save_adda_registers(hw, iqk_bb_reg,
2380 rtlphy->iqk_bb_backup, 10);
2381}
2382
2383void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw)
2384{
2385 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
2386 bool b_start_conttx = false, b_singletone = false;
2387
2388 if (b_start_conttx || b_singletone)
2389 return;
2390 if (IS_92C_SERIAL(rtlhal->version))
2391 _rtl92c_phy_lc_calibrate(hw, true);
2392 else
2393 _rtl92c_phy_lc_calibrate(hw, false);
2394}
2395
2396void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta)
2397{
2398 struct rtl_priv *rtlpriv = rtl_priv(hw);
2399 struct rtl_phy *rtlphy = &(rtlpriv->phy);
2400 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
2401
2402 if (rtlphy->b_apk_done)
2403 return;
2404 if (IS_92C_SERIAL(rtlhal->version))
2405 _rtl92c_phy_ap_calibrate(hw, delta, true);
2406 else
2407 _rtl92c_phy_ap_calibrate(hw, delta, false);
2408}
2409
2410void rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain)
2411{
2412 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
2413
2414 if (IS_92C_SERIAL(rtlhal->version))
2415 _rtl92c_phy_set_rfpath_switch(hw, bmain, true);
2416 else
2417 _rtl92c_phy_set_rfpath_switch(hw, bmain, false);
2418}
2419
2420bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
2421{
2422 struct rtl_priv *rtlpriv = rtl_priv(hw);
2423 struct rtl_phy *rtlphy = &(rtlpriv->phy);
2424 bool b_postprocessing = false;
2425
2426 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
2427 ("-->IO Cmd(%#x), set_io_inprogress(%d)\n",
2428 iotype, rtlphy->set_io_inprogress));
2429 do {
2430 switch (iotype) {
2431 case IO_CMD_RESUME_DM_BY_SCAN:
2432 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
2433 ("[IO CMD] Resume DM after scan.\n"));
2434 b_postprocessing = true;
2435 break;
2436 case IO_CMD_PAUSE_DM_BY_SCAN:
2437 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
2438 ("[IO CMD] Pause DM before scan.\n"));
2439 b_postprocessing = true;
2440 break;
2441 default:
2442 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
2443 ("switch case not process\n"));
2444 break;
2445 }
2446 } while (false);
2447 if (b_postprocessing && !rtlphy->set_io_inprogress) {
2448 rtlphy->set_io_inprogress = true;
2449 rtlphy->current_io_type = iotype;
2450 } else {
2451 return false;
2452 }
2453 rtl92c_phy_set_io(hw);
2454 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, ("<--IO Type(%#x)\n", iotype));
2455 return true;
2456}
2457
2458void rtl92c_phy_set_io(struct ieee80211_hw *hw)
2459{
2460 struct rtl_priv *rtlpriv = rtl_priv(hw);
2461 struct rtl_phy *rtlphy = &(rtlpriv->phy);
2462
2463 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
2464 ("--->Cmd(%#x), set_io_inprogress(%d)\n",
2465 rtlphy->current_io_type, rtlphy->set_io_inprogress));
2466 switch (rtlphy->current_io_type) {
2467 case IO_CMD_RESUME_DM_BY_SCAN:
2468 dm_digtable.cur_igvalue = rtlphy->initgain_backup.xaagccore1;
2469 rtl92c_dm_write_dig(hw);
2470 rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
2471 break;
2472 case IO_CMD_PAUSE_DM_BY_SCAN:
2473 rtlphy->initgain_backup.xaagccore1 = dm_digtable.cur_igvalue;
2474 dm_digtable.cur_igvalue = 0x17;
2475 rtl92c_dm_write_dig(hw);
2476 break;
2477 default:
2478 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
2479 ("switch case not process\n"));
2480 break;
2481 }
2482 rtlphy->set_io_inprogress = false;
2483 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
2484 ("<---(%#x)\n", rtlphy->current_io_type));
2485}
2486
2487void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw)
2488{
2489 struct rtl_priv *rtlpriv = rtl_priv(hw);
2490
2491 rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b);
2492 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
2493 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00);
2494 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
2495 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
2496 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
2497}
2498
2499static void _rtl92ce_phy_set_rf_sleep(struct ieee80211_hw *hw)
2500{
2501 u32 u4b_tmp;
2502 u8 delay = 5;
2503 struct rtl_priv *rtlpriv = rtl_priv(hw);
2504
2505 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
2506 rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
2507 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
2508 u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
2509 while (u4b_tmp != 0 && delay > 0) {
2510 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x0);
2511 rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
2512 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
2513 u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
2514 delay--;
2515 }
2516 if (delay == 0) {
2517 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00);
2518 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
2519 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
2520 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
2521 RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
2522 ("Switch RF timeout !!!.\n"));
2523 return;
2524 }
2525 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
2526 rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x22);
2527}
2528
2529static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw, 483static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
2530 enum rf_pwrstate rfpwr_state) 484 enum rf_pwrstate rfpwr_state)
2531{ 485{
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
index ca4daee6e9a..3fc60e434ce 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
@@ -57,8 +57,6 @@
57#define IQK_MAC_REG_NUM 4 57#define IQK_MAC_REG_NUM 4
58 58
59#define RF90_PATH_MAX 2 59#define RF90_PATH_MAX 2
60#define CHANNEL_MAX_NUMBER 14
61#define CHANNEL_GROUP_MAX 3
62 60
63#define CT_OFFSET_MAC_ADDR 0X16 61#define CT_OFFSET_MAC_ADDR 0X16
64 62
@@ -78,9 +76,7 @@
78#define CT_OFFSET_CUSTOMER_ID 0x7F 76#define CT_OFFSET_CUSTOMER_ID 0x7F
79 77
80#define RTL92C_MAX_PATH_NUM 2 78#define RTL92C_MAX_PATH_NUM 2
81#define CHANNEL_MAX_NUMBER 14 79#define LLT_LAST_ENTRY_OF_TX_PKT_BUFFER 255
82#define CHANNEL_GROUP_MAX 3
83
84enum swchnlcmd_id { 80enum swchnlcmd_id {
85 CMDID_END, 81 CMDID_END,
86 CMDID_SET_TXPOWEROWER_LEVEL, 82 CMDID_SET_TXPOWEROWER_LEVEL,
@@ -233,5 +229,6 @@ void rtl92c_phy_config_bb_external_pa(struct ieee80211_hw *hw);
233void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw); 229void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw);
234bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype); 230bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
235void rtl92c_phy_set_io(struct ieee80211_hw *hw); 231void rtl92c_phy_set_io(struct ieee80211_hw *hw);
232void rtl92c_bb_block_on(struct ieee80211_hw *hw);
236 233
237#endif 234#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h b/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
index 875d5146522..b0868a61384 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
@@ -63,7 +63,15 @@
63#define REG_LEDCFG3 0x004F 63#define REG_LEDCFG3 0x004F
64#define REG_FSIMR 0x0050 64#define REG_FSIMR 0x0050
65#define REG_FSISR 0x0054 65#define REG_FSISR 0x0054
66 66#define REG_HSIMR 0x0058
67#define REG_HSISR 0x005c
68
69/* RTL8723 WIFI/BT/GPS Multi-Function GPIO Pin Control. */
70#define REG_GPIO_PIN_CTRL_2 0x0060
71/* RTL8723 WIFI/BT/GPS Multi-Function GPIO Select. */
72#define REG_GPIO_IO_SEL_2 0x0062
73/* RTL8723 WIFI/BT/GPS Multi-Function control source. */
74#define REG_MULTI_FUNC_CTRL 0x0068
67#define REG_MCUFWDL 0x0080 75#define REG_MCUFWDL 0x0080
68 76
69#define REG_HMEBOX_EXT_0 0x0088 77#define REG_HMEBOX_EXT_0 0x0088
@@ -79,6 +87,7 @@
79#define REG_PCIE_MIO_INTD 0x00E8 87#define REG_PCIE_MIO_INTD 0x00E8
80#define REG_HPON_FSM 0x00EC 88#define REG_HPON_FSM 0x00EC
81#define REG_SYS_CFG 0x00F0 89#define REG_SYS_CFG 0x00F0
90#define REG_GPIO_OUTSTS 0x00F4 /* For RTL8723 only.*/
82 91
83#define REG_CR 0x0100 92#define REG_CR 0x0100
84#define REG_PBP 0x0104 93#define REG_PBP 0x0104
@@ -209,6 +218,8 @@
209#define REG_RDG_PIFS 0x0513 218#define REG_RDG_PIFS 0x0513
210#define REG_SIFS_CTX 0x0514 219#define REG_SIFS_CTX 0x0514
211#define REG_SIFS_TRX 0x0516 220#define REG_SIFS_TRX 0x0516
221#define REG_SIFS_CCK 0x0514
222#define REG_SIFS_OFDM 0x0516
212#define REG_AGGR_BREAK_TIME 0x051A 223#define REG_AGGR_BREAK_TIME 0x051A
213#define REG_SLOT 0x051B 224#define REG_SLOT 0x051B
214#define REG_TX_PTCL_CTRL 0x0520 225#define REG_TX_PTCL_CTRL 0x0520
@@ -261,6 +272,10 @@
261#define REG_MAC_SPEC_SIFS 0x063A 272#define REG_MAC_SPEC_SIFS 0x063A
262#define REG_RESP_SIFS_CCK 0x063C 273#define REG_RESP_SIFS_CCK 0x063C
263#define REG_RESP_SIFS_OFDM 0x063E 274#define REG_RESP_SIFS_OFDM 0x063E
275/* [15:8]SIFS_R2T_OFDM, [7:0]SIFS_R2T_CCK */
276#define REG_R2T_SIFS 0x063C
277/* [15:8]SIFS_T2T_OFDM, [7:0]SIFS_T2T_CCK */
278#define REG_T2T_SIFS 0x063E
264#define REG_ACKTO 0x0640 279#define REG_ACKTO 0x0640
265#define REG_CTS2TO 0x0641 280#define REG_CTS2TO 0x0641
266#define REG_EIFS 0x0642 281#define REG_EIFS 0x0642
@@ -641,9 +656,10 @@
641#define STOPBE BIT(1) 656#define STOPBE BIT(1)
642#define STOPBK BIT(0) 657#define STOPBK BIT(0)
643 658
644#define RCR_APPFCS BIT(31) 659#define RCR_APP_FCS BIT(31)
645#define RCR_APP_MIC BIT(30) 660#define RCR_APP_MIC BIT(30)
646#define RCR_APP_ICV BIT(29) 661#define RCR_APP_ICV BIT(29)
662#define RCR_APP_PHYSTS BIT(28)
647#define RCR_APP_PHYST_RXFF BIT(28) 663#define RCR_APP_PHYST_RXFF BIT(28)
648#define RCR_APP_BA_SSN BIT(27) 664#define RCR_APP_BA_SSN BIT(27)
649#define RCR_ENMBID BIT(24) 665#define RCR_ENMBID BIT(24)
@@ -759,6 +775,7 @@
759 775
760#define BOOT_FROM_EEPROM BIT(4) 776#define BOOT_FROM_EEPROM BIT(4)
761#define EEPROM_EN BIT(5) 777#define EEPROM_EN BIT(5)
778#define EEPROMSEL BOOT_FROM_EEPROM
762 779
763#define AFE_BGEN BIT(0) 780#define AFE_BGEN BIT(0)
764#define AFE_MBEN BIT(1) 781#define AFE_MBEN BIT(1)
@@ -876,6 +893,8 @@
876#define BD_MAC2 BIT(9) 893#define BD_MAC2 BIT(9)
877#define BD_MAC1 BIT(10) 894#define BD_MAC1 BIT(10)
878#define IC_MACPHY_MODE BIT(11) 895#define IC_MACPHY_MODE BIT(11)
896#define BT_FUNC BIT(16)
897#define VENDOR_ID BIT(19)
879#define PAD_HWPD_IDN BIT(22) 898#define PAD_HWPD_IDN BIT(22)
880#define TRP_VAUX_EN BIT(23) 899#define TRP_VAUX_EN BIT(23)
881#define TRP_BT_EN BIT(24) 900#define TRP_BT_EN BIT(24)
@@ -883,6 +902,28 @@
883#define BD_HCI_SEL BIT(26) 902#define BD_HCI_SEL BIT(26)
884#define TYPE_ID BIT(27) 903#define TYPE_ID BIT(27)
885 904
905/* REG_GPIO_OUTSTS (For RTL8723 only) */
906#define EFS_HCI_SEL (BIT(0)|BIT(1))
907#define PAD_HCI_SEL (BIT(2)|BIT(3))
908#define HCI_SEL (BIT(4)|BIT(5))
909#define PKG_SEL_HCI BIT(6)
910#define FEN_GPS BIT(7)
911#define FEN_BT BIT(8)
912#define FEN_WL BIT(9)
913#define FEN_PCI BIT(10)
914#define FEN_USB BIT(11)
915#define BTRF_HWPDN_N BIT(12)
916#define WLRF_HWPDN_N BIT(13)
917#define PDN_BT_N BIT(14)
918#define PDN_GPS_N BIT(15)
919#define BT_CTL_HWPDN BIT(16)
920#define GPS_CTL_HWPDN BIT(17)
921#define PPHY_SUSB BIT(20)
922#define UPHY_SUSB BIT(21)
923#define PCI_SUSEN BIT(22)
924#define USB_SUSEN BIT(23)
925#define RF_RL_ID (BIT(31) | BIT(30) | BIT(29) | BIT(28))
926
886#define CHIP_VER_RTL_MASK 0xF000 927#define CHIP_VER_RTL_MASK 0xF000
887#define CHIP_VER_RTL_SHIFT 12 928#define CHIP_VER_RTL_SHIFT 12
888 929
@@ -1035,7 +1076,7 @@
1035#define _RARF_RC7(x) (((x) & 0x1F) << 16) 1076#define _RARF_RC7(x) (((x) & 0x1F) << 16)
1036#define _RARF_RC8(x) (((x) & 0x1F) << 24) 1077#define _RARF_RC8(x) (((x) & 0x1F) << 24)
1037 1078
1038#define AC_PARAM_TXOP_LIMIT_OFFSET 16 1079#define AC_PARAM_TXOP_OFFSET 16
1039#define AC_PARAM_ECW_MAX_OFFSET 12 1080#define AC_PARAM_ECW_MAX_OFFSET 12
1040#define AC_PARAM_ECW_MIN_OFFSET 8 1081#define AC_PARAM_ECW_MIN_OFFSET 8
1041#define AC_PARAM_AIFS_OFFSET 0 1082#define AC_PARAM_AIFS_OFFSET 0
@@ -1184,6 +1225,30 @@
1184 1225
1185#define HAL_8192C_HW_GPIO_WPS_BIT BIT(2) 1226#define HAL_8192C_HW_GPIO_WPS_BIT BIT(2)
1186 1227
1228/* REG_MULTI_FUNC_CTRL(For RTL8723 Only) */
1229/* Enable GPIO[9] as WiFi HW PDn source */
1230#define WL_HWPDN_EN BIT(0)
1231/* WiFi HW PDn polarity control */
1232#define WL_HWPDN_SL BIT(1)
1233/* WiFi function enable */
1234#define WL_FUNC_EN BIT(2)
1235/* Enable GPIO[9] as WiFi RF HW PDn source */
1236#define WL_HWROF_EN BIT(3)
1237/* Enable GPIO[11] as BT HW PDn source */
1238#define BT_HWPDN_EN BIT(16)
1239/* BT HW PDn polarity control */
1240#define BT_HWPDN_SL BIT(17)
1241/* BT function enable */
1242#define BT_FUNC_EN BIT(18)
1243/* Enable GPIO[11] as BT/GPS RF HW PDn source */
1244#define BT_HWROF_EN BIT(19)
1245/* Enable GPIO[10] as GPS HW PDn source */
1246#define GPS_HWPDN_EN BIT(20)
1247/* GPS HW PDn polarity control */
1248#define GPS_HWPDN_SL BIT(21)
1249/* GPS function enable */
1250#define GPS_FUNC_EN BIT(22)
1251
1187#define RPMAC_RESET 0x100 1252#define RPMAC_RESET 0x100
1188#define RPMAC_TXSTART 0x104 1253#define RPMAC_TXSTART 0x104
1189#define RPMAC_TXLEGACYSIG 0x108 1254#define RPMAC_TXLEGACYSIG 0x108
@@ -1496,7 +1561,7 @@
1496#define BTXHTSTBC 0x30 1561#define BTXHTSTBC 0x30
1497#define BTXHTADVANCECODING 0x40 1562#define BTXHTADVANCECODING 0x40
1498#define BTXHTSHORTGI 0x80 1563#define BTXHTSHORTGI 0x80
1499#define BTXHTNUMBERHT_LT F 0x300 1564#define BTXHTNUMBERHT_LTF 0x300
1500#define BTXHTCRC8 0x3fc00 1565#define BTXHTCRC8 0x3fc00
1501#define BCOUNTERRESET 0x10000 1566#define BCOUNTERRESET 0x10000
1502#define BNUMOFOFDMTX 0xffff 1567#define BNUMOFOFDMTX 0xffff
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
index b366e886292..b4df0b33283 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
@@ -46,13 +46,13 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
46 struct rtl_priv *rtlpriv = rtl_priv(hw); 46 struct rtl_priv *rtlpriv = rtl_priv(hw);
47 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 47 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
48 48
49 rtlpriv->dm.b_dm_initialgain_enable = 1; 49 rtlpriv->dm.dm_initialgain_enable = 1;
50 rtlpriv->dm.dm_flag = 0; 50 rtlpriv->dm.dm_flag = 0;
51 rtlpriv->dm.b_disable_framebursting = 0;; 51 rtlpriv->dm.disable_framebursting = 0;
52 rtlpriv->dm.thermalvalue = 0; 52 rtlpriv->dm.thermalvalue = 0;
53 rtlpci->transmit_config = CFENDFORM | BIT(12) | BIT(13); 53 rtlpci->transmit_config = CFENDFORM | BIT(12) | BIT(13);
54 54
55 rtlpci->receive_config = (RCR_APPFCS | 55 rtlpci->receive_config = (RCR_APP_FCS |
56 RCR_AMF | 56 RCR_AMF |
57 RCR_ADF | 57 RCR_ADF |
58 RCR_APP_MIC | 58 RCR_APP_MIC |
@@ -135,6 +135,7 @@ static struct rtl_hal_ops rtl8192ce_hal_ops = {
135 .set_bbreg = rtl92c_phy_set_bb_reg, 135 .set_bbreg = rtl92c_phy_set_bb_reg,
136 .get_rfreg = rtl92c_phy_query_rf_reg, 136 .get_rfreg = rtl92c_phy_query_rf_reg,
137 .set_rfreg = rtl92c_phy_set_rf_reg, 137 .set_rfreg = rtl92c_phy_set_rf_reg,
138 .cmd_send_packet = _rtl92c_cmd_send_packet,
138}; 139};
139 140
140static struct rtl_mod_params rtl92ce_mod_params = { 141static struct rtl_mod_params rtl92ce_mod_params = {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.h b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.h
index de1198c38d4..0568d6dc83d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.h
@@ -33,5 +33,7 @@
33int rtl92c_init_sw_vars(struct ieee80211_hw *hw); 33int rtl92c_init_sw_vars(struct ieee80211_hw *hw);
34void rtl92c_deinit_sw_vars(struct ieee80211_hw *hw); 34void rtl92c_deinit_sw_vars(struct ieee80211_hw *hw);
35void rtl92c_init_var_map(struct ieee80211_hw *hw); 35void rtl92c_init_var_map(struct ieee80211_hw *hw);
36bool _rtl92c_cmd_send_packet(struct ieee80211_hw *hw,
37 struct sk_buff *skb);
36 38
37#endif 39#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
index bf5852f2d63..01b95427fee 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -36,7 +36,7 @@
36#include "trx.h" 36#include "trx.h"
37#include "led.h" 37#include "led.h"
38 38
39static enum rtl_desc_qsel _rtl92ce_map_hwqueue_to_fwqueue(u16 fc, 39static enum rtl_desc_qsel _rtl92ce_map_hwqueue_to_fwqueue(__le16 fc,
40 unsigned int 40 unsigned int
41 skb_queue) 41 skb_queue)
42{ 42{
@@ -245,9 +245,9 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
245 struct rtl_stats *pstats, 245 struct rtl_stats *pstats,
246 struct rx_desc_92c *pdesc, 246 struct rx_desc_92c *pdesc,
247 struct rx_fwinfo_92c *p_drvinfo, 247 struct rx_fwinfo_92c *p_drvinfo,
248 bool bpacket_match_bssid, 248 bool packet_match_bssid,
249 bool bpacket_toself, 249 bool packet_toself,
250 bool b_packet_beacon) 250 bool packet_beacon)
251{ 251{
252 struct rtl_priv *rtlpriv = rtl_priv(hw); 252 struct rtl_priv *rtlpriv = rtl_priv(hw);
253 struct phy_sts_cck_8192s_t *cck_buf; 253 struct phy_sts_cck_8192s_t *cck_buf;
@@ -258,11 +258,11 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
258 bool is_cck_rate; 258 bool is_cck_rate;
259 259
260 is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc); 260 is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc);
261 pstats->b_packet_matchbssid = bpacket_match_bssid; 261 pstats->packet_matchbssid = packet_match_bssid;
262 pstats->b_packet_toself = bpacket_toself; 262 pstats->packet_toself = packet_toself;
263 pstats->b_is_cck = is_cck_rate; 263 pstats->is_cck = is_cck_rate;
264 pstats->b_packet_beacon = b_packet_beacon; 264 pstats->packet_beacon = packet_beacon;
265 pstats->b_is_cck = is_cck_rate; 265 pstats->is_cck = is_cck_rate;
266 pstats->rx_mimo_signalquality[0] = -1; 266 pstats->rx_mimo_signalquality[0] = -1;
267 pstats->rx_mimo_signalquality[1] = -1; 267 pstats->rx_mimo_signalquality[1] = -1;
268 268
@@ -315,7 +315,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
315 pstats->rx_pwdb_all = pwdb_all; 315 pstats->rx_pwdb_all = pwdb_all;
316 pstats->recvsignalpower = rx_pwr_all; 316 pstats->recvsignalpower = rx_pwr_all;
317 317
318 if (bpacket_match_bssid) { 318 if (packet_match_bssid) {
319 u8 sq; 319 u8 sq;
320 if (pstats->rx_pwdb_all > 40) 320 if (pstats->rx_pwdb_all > 40)
321 sq = 100; 321 sq = 100;
@@ -334,10 +334,10 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
334 pstats->rx_mimo_signalquality[1] = -1; 334 pstats->rx_mimo_signalquality[1] = -1;
335 } 335 }
336 } else { 336 } else {
337 rtlpriv->dm.brfpath_rxenable[0] = 337 rtlpriv->dm.rfpath_rxenable[0] =
338 rtlpriv->dm.brfpath_rxenable[1] = true; 338 rtlpriv->dm.rfpath_rxenable[1] = true;
339 for (i = RF90_PATH_A; i < RF90_PATH_MAX; i++) { 339 for (i = RF90_PATH_A; i < RF90_PATH_MAX; i++) {
340 if (rtlpriv->dm.brfpath_rxenable[i]) 340 if (rtlpriv->dm.rfpath_rxenable[i])
341 rf_rx_num++; 341 rf_rx_num++;
342 342
343 rx_pwr[i] = 343 rx_pwr[i] =
@@ -347,7 +347,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
347 rtlpriv->stats.rx_snr_db[i] = 347 rtlpriv->stats.rx_snr_db[i] =
348 (long)(p_drvinfo->rxsnr[i] / 2); 348 (long)(p_drvinfo->rxsnr[i] / 2);
349 349
350 if (bpacket_match_bssid) 350 if (packet_match_bssid)
351 pstats->rx_mimo_signalstrength[i] = (u8) rssi; 351 pstats->rx_mimo_signalstrength[i] = (u8) rssi;
352 } 352 }
353 353
@@ -366,7 +366,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
366 for (i = 0; i < max_spatial_stream; i++) { 366 for (i = 0; i < max_spatial_stream; i++) {
367 evm = _rtl92c_evm_db_to_percentage(p_drvinfo->rxevm[i]); 367 evm = _rtl92c_evm_db_to_percentage(p_drvinfo->rxevm[i]);
368 368
369 if (bpacket_match_bssid) { 369 if (packet_match_bssid) {
370 if (i == 0) 370 if (i == 0)
371 pstats->signalquality = 371 pstats->signalquality =
372 (u8) (evm & 0xff); 372 (u8) (evm & 0xff);
@@ -393,7 +393,7 @@ static void _rtl92ce_process_ui_rssi(struct ieee80211_hw *hw,
393 u8 rfpath; 393 u8 rfpath;
394 u32 last_rssi, tmpval; 394 u32 last_rssi, tmpval;
395 395
396 if (pstats->b_packet_toself || pstats->b_packet_beacon) { 396 if (pstats->packet_toself || pstats->packet_beacon) {
397 rtlpriv->stats.rssi_calculate_cnt++; 397 rtlpriv->stats.rssi_calculate_cnt++;
398 398
399 if (rtlpriv->stats.ui_rssi.total_num++ >= 399 if (rtlpriv->stats.ui_rssi.total_num++ >=
@@ -421,7 +421,7 @@ static void _rtl92ce_process_ui_rssi(struct ieee80211_hw *hw,
421 pstats->rssi = rtlpriv->stats.signal_strength; 421 pstats->rssi = rtlpriv->stats.signal_strength;
422 } 422 }
423 423
424 if (!pstats->b_is_cck && pstats->b_packet_toself) { 424 if (!pstats->is_cck && pstats->packet_toself) {
425 for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath; 425 for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
426 rfpath++) { 426 rfpath++) {
427 427
@@ -493,7 +493,7 @@ static void _rtl92ce_process_pwdb(struct ieee80211_hw *hw,
493 rtlpriv->dm.undecorated_smoothed_pwdb; 493 rtlpriv->dm.undecorated_smoothed_pwdb;
494 } 494 }
495 495
496 if (pstats->b_packet_toself || pstats->b_packet_beacon) { 496 if (pstats->packet_toself || pstats->packet_beacon) {
497 if (undecorated_smoothed_pwdb < 0) 497 if (undecorated_smoothed_pwdb < 0)
498 undecorated_smoothed_pwdb = pstats->rx_pwdb_all; 498 undecorated_smoothed_pwdb = pstats->rx_pwdb_all;
499 499
@@ -525,7 +525,7 @@ static void _rtl92ce_process_ui_link_quality(struct ieee80211_hw *hw,
525 u32 last_evm, n_spatialstream, tmpval; 525 u32 last_evm, n_spatialstream, tmpval;
526 526
527 if (pstats->signalquality != 0) { 527 if (pstats->signalquality != 0) {
528 if (pstats->b_packet_toself || pstats->b_packet_beacon) { 528 if (pstats->packet_toself || pstats->packet_beacon) {
529 529
530 if (rtlpriv->stats.ui_link_quality.total_num++ >= 530 if (rtlpriv->stats.ui_link_quality.total_num++ >=
531 PHY_LINKQUALITY_SLID_WIN_MAX) { 531 PHY_LINKQUALITY_SLID_WIN_MAX) {
@@ -595,8 +595,8 @@ static void _rtl92ce_process_phyinfo(struct ieee80211_hw *hw,
595 struct rtl_stats *pcurrent_stats) 595 struct rtl_stats *pcurrent_stats)
596{ 596{
597 597
598 if (!pcurrent_stats->b_packet_matchbssid && 598 if (!pcurrent_stats->packet_matchbssid &&
599 !pcurrent_stats->b_packet_beacon) 599 !pcurrent_stats->packet_beacon)
600 return; 600 return;
601 601
602 _rtl92ce_process_ui_rssi(hw, pcurrent_stats); 602 _rtl92ce_process_ui_rssi(hw, pcurrent_stats);
@@ -617,34 +617,36 @@ static void _rtl92ce_translate_rx_signal_stuff(struct ieee80211_hw *hw,
617 u8 *tmp_buf; 617 u8 *tmp_buf;
618 u8 *praddr; 618 u8 *praddr;
619 u8 *psaddr; 619 u8 *psaddr;
620 u16 fc, type; 620 __le16 fc;
621 bool b_packet_matchbssid, b_packet_toself, b_packet_beacon; 621 u16 type, c_fc;
622 bool packet_matchbssid, packet_toself, packet_beacon;
622 623
623 tmp_buf = skb->data + pstats->rx_drvinfo_size + pstats->rx_bufshift; 624 tmp_buf = skb->data + pstats->rx_drvinfo_size + pstats->rx_bufshift;
624 625
625 hdr = (struct ieee80211_hdr *)tmp_buf; 626 hdr = (struct ieee80211_hdr *)tmp_buf;
626 fc = le16_to_cpu(hdr->frame_control); 627 fc = hdr->frame_control;
628 c_fc = le16_to_cpu(fc);
627 type = WLAN_FC_GET_TYPE(fc); 629 type = WLAN_FC_GET_TYPE(fc);
628 praddr = hdr->addr1; 630 praddr = hdr->addr1;
629 psaddr = hdr->addr2; 631 psaddr = hdr->addr2;
630 632
631 b_packet_matchbssid = 633 packet_matchbssid =
632 ((IEEE80211_FTYPE_CTL != type) && 634 ((IEEE80211_FTYPE_CTL != type) &&
633 (!compare_ether_addr(mac->bssid, 635 (!compare_ether_addr(mac->bssid,
634 (fc & IEEE80211_FCTL_TODS) ? 636 (c_fc & IEEE80211_FCTL_TODS) ?
635 hdr->addr1 : (fc & IEEE80211_FCTL_FROMDS) ? 637 hdr->addr1 : (c_fc & IEEE80211_FCTL_FROMDS) ?
636 hdr->addr2 : hdr->addr3)) && 638 hdr->addr2 : hdr->addr3)) &&
637 (!pstats->b_hwerror) && (!pstats->b_crc) && (!pstats->b_icv)); 639 (!pstats->hwerror) && (!pstats->crc) && (!pstats->icv));
638 640
639 b_packet_toself = b_packet_matchbssid && 641 packet_toself = packet_matchbssid &&
640 (!compare_ether_addr(praddr, rtlefuse->dev_addr)); 642 (!compare_ether_addr(praddr, rtlefuse->dev_addr));
641 643
642 if (ieee80211_is_beacon(fc)) 644 if (ieee80211_is_beacon(fc))
643 b_packet_beacon = true; 645 packet_beacon = true;
644 646
645 _rtl92ce_query_rxphystatus(hw, pstats, pdesc, p_drvinfo, 647 _rtl92ce_query_rxphystatus(hw, pstats, pdesc, p_drvinfo,
646 b_packet_matchbssid, b_packet_toself, 648 packet_matchbssid, packet_toself,
647 b_packet_beacon); 649 packet_beacon);
648 650
649 _rtl92ce_process_phyinfo(hw, tmp_buf, pstats); 651 _rtl92ce_process_phyinfo(hw, tmp_buf, pstats);
650} 652}
@@ -662,14 +664,14 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
662 stats->rx_drvinfo_size = (u8) GET_RX_DESC_DRV_INFO_SIZE(pdesc) * 664 stats->rx_drvinfo_size = (u8) GET_RX_DESC_DRV_INFO_SIZE(pdesc) *
663 RX_DRV_INFO_SIZE_UNIT; 665 RX_DRV_INFO_SIZE_UNIT;
664 stats->rx_bufshift = (u8) (GET_RX_DESC_SHIFT(pdesc) & 0x03); 666 stats->rx_bufshift = (u8) (GET_RX_DESC_SHIFT(pdesc) & 0x03);
665 stats->b_icv = (u16) GET_RX_DESC_ICV(pdesc); 667 stats->icv = (u16) GET_RX_DESC_ICV(pdesc);
666 stats->b_crc = (u16) GET_RX_DESC_CRC32(pdesc); 668 stats->crc = (u16) GET_RX_DESC_CRC32(pdesc);
667 stats->b_hwerror = (stats->b_crc | stats->b_icv); 669 stats->hwerror = (stats->crc | stats->icv);
668 stats->decrypted = !GET_RX_DESC_SWDEC(pdesc); 670 stats->decrypted = !GET_RX_DESC_SWDEC(pdesc);
669 stats->rate = (u8) GET_RX_DESC_RXMCS(pdesc); 671 stats->rate = (u8) GET_RX_DESC_RXMCS(pdesc);
670 stats->b_shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc); 672 stats->shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc);
671 stats->b_isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1); 673 stats->isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1);
672 stats->b_isampdu = (bool) ((GET_RX_DESC_PAGGR(pdesc) == 1) 674 stats->isampdu = (bool) ((GET_RX_DESC_PAGGR(pdesc) == 1)
673 && (GET_RX_DESC_FAGGR(pdesc) == 1)); 675 && (GET_RX_DESC_FAGGR(pdesc) == 1));
674 stats->timestamp_low = GET_RX_DESC_TSFL(pdesc); 676 stats->timestamp_low = GET_RX_DESC_TSFL(pdesc);
675 stats->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc); 677 stats->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc);
@@ -727,27 +729,24 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
727 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 729 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
728 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 730 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
729 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 731 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
730 bool b_defaultadapter = true; 732 bool defaultadapter = true;
731
732 struct ieee80211_sta *sta = ieee80211_find_sta(mac->vif, mac->bssid); 733 struct ieee80211_sta *sta = ieee80211_find_sta(mac->vif, mac->bssid);
733
734 u8 *pdesc = (u8 *) pdesc_tx; 734 u8 *pdesc = (u8 *) pdesc_tx;
735 struct rtl_tcb_desc tcb_desc; 735 struct rtl_tcb_desc tcb_desc;
736 u8 *qc = ieee80211_get_qos_ctl(hdr); 736 u8 *qc = ieee80211_get_qos_ctl(hdr);
737 u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; 737 u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
738 u16 seq_number; 738 u16 seq_number;
739 u16 fc = le16_to_cpu(hdr->frame_control); 739 __le16 fc = hdr->frame_control;
740 u8 rate_flag = info->control.rates[0].flags; 740 u8 rate_flag = info->control.rates[0].flags;
741 741
742 enum rtl_desc_qsel fw_qsel = 742 enum rtl_desc_qsel fw_qsel =
743 _rtl92ce_map_hwqueue_to_fwqueue(le16_to_cpu(hdr->frame_control), 743 _rtl92ce_map_hwqueue_to_fwqueue(fc, queue_index);
744 queue_index);
745 744
746 bool b_firstseg = ((hdr->seq_ctrl & 745 bool firstseg = ((hdr->seq_ctrl &
747 cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0); 746 cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0);
748 747
749 bool b_lastseg = ((hdr->frame_control & 748 bool lastseg = ((hdr->frame_control &
750 cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) == 0); 749 cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) == 0);
751 750
752 dma_addr_t mapping = pci_map_single(rtlpci->pdev, 751 dma_addr_t mapping = pci_map_single(rtlpci->pdev,
753 skb->data, skb->len, 752 skb->data, skb->len,
@@ -759,7 +758,7 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
759 758
760 CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_92c)); 759 CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_92c));
761 760
762 if (b_firstseg) { 761 if (firstseg) {
763 SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN); 762 SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
764 763
765 SET_TX_DESC_TX_RATE(pdesc, tcb_desc.hw_rate); 764 SET_TX_DESC_TX_RATE(pdesc, tcb_desc.hw_rate);
@@ -774,25 +773,25 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
774 } 773 }
775 SET_TX_DESC_SEQ(pdesc, seq_number); 774 SET_TX_DESC_SEQ(pdesc, seq_number);
776 775
777 SET_TX_DESC_RTS_ENABLE(pdesc, ((tcb_desc.b_rts_enable && 776 SET_TX_DESC_RTS_ENABLE(pdesc, ((tcb_desc.rts_enable &&
778 !tcb_desc. 777 !tcb_desc.
779 b_cts_enable) ? 1 : 0)); 778 cts_enable) ? 1 : 0));
780 SET_TX_DESC_HW_RTS_ENABLE(pdesc, 779 SET_TX_DESC_HW_RTS_ENABLE(pdesc,
781 ((tcb_desc.b_rts_enable 780 ((tcb_desc.rts_enable
782 || tcb_desc.b_cts_enable) ? 1 : 0)); 781 || tcb_desc.cts_enable) ? 1 : 0));
783 SET_TX_DESC_CTS2SELF(pdesc, ((tcb_desc.b_cts_enable) ? 1 : 0)); 782 SET_TX_DESC_CTS2SELF(pdesc, ((tcb_desc.cts_enable) ? 1 : 0));
784 SET_TX_DESC_RTS_STBC(pdesc, ((tcb_desc.b_rts_stbc) ? 1 : 0)); 783 SET_TX_DESC_RTS_STBC(pdesc, ((tcb_desc.rts_stbc) ? 1 : 0));
785 784
786 SET_TX_DESC_RTS_RATE(pdesc, tcb_desc.rts_rate); 785 SET_TX_DESC_RTS_RATE(pdesc, tcb_desc.rts_rate);
787 SET_TX_DESC_RTS_BW(pdesc, 0); 786 SET_TX_DESC_RTS_BW(pdesc, 0);
788 SET_TX_DESC_RTS_SC(pdesc, tcb_desc.rts_sc); 787 SET_TX_DESC_RTS_SC(pdesc, tcb_desc.rts_sc);
789 SET_TX_DESC_RTS_SHORT(pdesc, 788 SET_TX_DESC_RTS_SHORT(pdesc,
790 ((tcb_desc.rts_rate <= DESC92C_RATE54M) ? 789 ((tcb_desc.rts_rate <= DESC92C_RATE54M) ?
791 (tcb_desc.b_rts_use_shortpreamble ? 1 : 0) 790 (tcb_desc.rts_use_shortpreamble ? 1 : 0)
792 : (tcb_desc.b_rts_use_shortgi ? 1 : 0))); 791 : (tcb_desc.rts_use_shortgi ? 1 : 0)));
793 792
794 if (mac->bw_40) { 793 if (mac->bw_40) {
795 if (tcb_desc.b_packet_bw) { 794 if (tcb_desc.packet_bw) {
796 SET_TX_DESC_DATA_BW(pdesc, 1); 795 SET_TX_DESC_DATA_BW(pdesc, 1);
797 SET_TX_DESC_TX_SUB_CARRIER(pdesc, 3); 796 SET_TX_DESC_TX_SUB_CARRIER(pdesc, 3);
798 } else { 797 } else {
@@ -854,14 +853,14 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
854 } 853 }
855 } 854 }
856 855
857 SET_TX_DESC_FIRST_SEG(pdesc, (b_firstseg ? 1 : 0)); 856 SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0));
858 SET_TX_DESC_LAST_SEG(pdesc, (b_lastseg ? 1 : 0)); 857 SET_TX_DESC_LAST_SEG(pdesc, (lastseg ? 1 : 0));
859 858
860 SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) skb->len); 859 SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) skb->len);
861 860
862 SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping)); 861 SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping));
863 862
864 if (rtlpriv->dm.b_useramask) { 863 if (rtlpriv->dm.useramask) {
865 SET_TX_DESC_RATE_ID(pdesc, tcb_desc.ratr_index); 864 SET_TX_DESC_RATE_ID(pdesc, tcb_desc.ratr_index);
866 SET_TX_DESC_MACID(pdesc, tcb_desc.mac_id); 865 SET_TX_DESC_MACID(pdesc, tcb_desc.mac_id);
867 } else { 866 } else {
@@ -869,16 +868,16 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
869 SET_TX_DESC_MACID(pdesc, tcb_desc.ratr_index); 868 SET_TX_DESC_MACID(pdesc, tcb_desc.ratr_index);
870 } 869 }
871 870
872 if ((!ieee80211_is_data_qos(fc)) && ppsc->b_leisure_ps && 871 if ((!ieee80211_is_data_qos(fc)) && ppsc->leisure_ps &&
873 ppsc->b_fwctrl_lps) { 872 ppsc->fwctrl_lps) {
874 SET_TX_DESC_HWSEQ_EN(pdesc, 1); 873 SET_TX_DESC_HWSEQ_EN(pdesc, 1);
875 SET_TX_DESC_PKT_ID(pdesc, 8); 874 SET_TX_DESC_PKT_ID(pdesc, 8);
876 875
877 if (!b_defaultadapter) 876 if (!defaultadapter)
878 SET_TX_DESC_QOS(pdesc, 1); 877 SET_TX_DESC_QOS(pdesc, 1);
879 } 878 }
880 879
881 SET_TX_DESC_MORE_FRAG(pdesc, (b_lastseg ? 0 : 1)); 880 SET_TX_DESC_MORE_FRAG(pdesc, (lastseg ? 0 : 1));
882 881
883 if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) || 882 if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) ||
884 is_broadcast_ether_addr(ieee80211_get_DA(hdr))) { 883 is_broadcast_ether_addr(ieee80211_get_DA(hdr))) {
@@ -889,8 +888,8 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
889} 888}
890 889
891void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw, 890void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw,
892 u8 *pdesc, bool b_firstseg, 891 u8 *pdesc, bool firstseg,
893 bool b_lastseg, struct sk_buff *skb) 892 bool lastseg, struct sk_buff *skb)
894{ 893{
895 struct rtl_priv *rtlpriv = rtl_priv(hw); 894 struct rtl_priv *rtlpriv = rtl_priv(hw);
896 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 895 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -901,11 +900,11 @@ void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw,
901 PCI_DMA_TODEVICE); 900 PCI_DMA_TODEVICE);
902 901
903 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); 902 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
904 u16 fc = le16_to_cpu(hdr->frame_control); 903 __le16 fc = hdr->frame_control;
905 904
906 CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE); 905 CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE);
907 906
908 if (b_firstseg) 907 if (firstseg)
909 SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN); 908 SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
910 909
911 SET_TX_DESC_TX_RATE(pdesc, DESC92C_RATE1M); 910 SET_TX_DESC_TX_RATE(pdesc, DESC92C_RATE1M);
@@ -1029,3 +1028,36 @@ void rtl92ce_tx_polling(struct ieee80211_hw *hw, unsigned int hw_queue)
1029 BIT(0) << (hw_queue)); 1028 BIT(0) << (hw_queue));
1030 } 1029 }
1031} 1030}
1031
1032bool _rtl92c_cmd_send_packet(struct ieee80211_hw *hw,
1033 struct sk_buff *skb)
1034{
1035 struct rtl_priv *rtlpriv = rtl_priv(hw);
1036 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1037 struct rtl8192_tx_ring *ring;
1038 struct rtl_tx_desc *pdesc;
1039 u8 own;
1040 unsigned long flags;
1041 struct sk_buff *pskb = NULL;
1042
1043 ring = &rtlpci->tx_ring[BEACON_QUEUE];
1044
1045 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
1046
1047 pskb = __skb_dequeue(&ring->queue);
1048 if (pskb)
1049 kfree_skb(pskb);
1050
1051 pdesc = &ring->desc[0];
1052 own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc, true, HW_DESC_OWN);
1053
1054 rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *) pdesc, 1, 1, skb);
1055
1056 __skb_queue_tail(&ring->queue, skb);
1057
1058 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
1059
1060 rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE);
1061
1062 return true;
1063}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
index 53d0e0a5af5..803adcc80c9 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
@@ -40,470 +40,494 @@
40#define USB_HWDESC_HEADER_LEN 32 40#define USB_HWDESC_HEADER_LEN 32
41#define CRCLENGTH 4 41#define CRCLENGTH 4
42 42
43/* Define a macro that takes a le32 word, converts it to host ordering,
44 * right shifts by a specified count, creates a mask of the specified
45 * bit count, and extracts that number of bits.
46 */
47
48#define SHIFT_AND_MASK_LE(__pdesc, __shift, __mask) \
49 ((le32_to_cpu(*(((__le32 *)(__pdesc)))) >> (__shift)) & \
50 BIT_LEN_MASK_32(__mask))
51
52/* Define a macro that clears a bit field in an le32 word and
53 * sets the specified value into that bit field. The resulting
54 * value remains in le32 ordering; however, it is properly converted
55 * to host ordering for the clear and set operations before conversion
56 * back to le32.
57 */
58
59#define SET_BITS_OFFSET_LE(__pdesc, __shift, __len, __val) \
60 (*(__le32 *)(__pdesc) = \
61 (cpu_to_le32((le32_to_cpu(*((__le32 *)(__pdesc))) & \
62 (~(BIT_OFFSET_LEN_MASK_32((__shift), __len)))) | \
63 (((u32)(__val) & BIT_LEN_MASK_32(__len)) << (__shift)))));
64
65/* macros to read/write various fields in RX or TX descriptors */
66
43#define SET_TX_DESC_PKT_SIZE(__pdesc, __val) \ 67#define SET_TX_DESC_PKT_SIZE(__pdesc, __val) \
44 SET_BITS_TO_LE_4BYTE(__pdesc, 0, 16, __val) 68 SET_BITS_OFFSET_LE(__pdesc, 0, 16, __val)
45#define SET_TX_DESC_OFFSET(__pdesc, __val) \ 69#define SET_TX_DESC_OFFSET(__pdesc, __val) \
46 SET_BITS_TO_LE_4BYTE(__pdesc, 16, 8, __val) 70 SET_BITS_OFFSET_LE(__pdesc, 16, 8, __val)
47#define SET_TX_DESC_BMC(__pdesc, __val) \ 71#define SET_TX_DESC_BMC(__pdesc, __val) \
48 SET_BITS_TO_LE_4BYTE(__pdesc, 24, 1, __val) 72 SET_BITS_OFFSET_LE(__pdesc, 24, 1, __val)
49#define SET_TX_DESC_HTC(__pdesc, __val) \ 73#define SET_TX_DESC_HTC(__pdesc, __val) \
50 SET_BITS_TO_LE_4BYTE(__pdesc, 25, 1, __val) 74 SET_BITS_OFFSET_LE(__pdesc, 25, 1, __val)
51#define SET_TX_DESC_LAST_SEG(__pdesc, __val) \ 75#define SET_TX_DESC_LAST_SEG(__pdesc, __val) \
52 SET_BITS_TO_LE_4BYTE(__pdesc, 26, 1, __val) 76 SET_BITS_OFFSET_LE(__pdesc, 26, 1, __val)
53#define SET_TX_DESC_FIRST_SEG(__pdesc, __val) \ 77#define SET_TX_DESC_FIRST_SEG(__pdesc, __val) \
54 SET_BITS_TO_LE_4BYTE(__pdesc, 27, 1, __val) 78 SET_BITS_OFFSET_LE(__pdesc, 27, 1, __val)
55#define SET_TX_DESC_LINIP(__pdesc, __val) \ 79#define SET_TX_DESC_LINIP(__pdesc, __val) \
56 SET_BITS_TO_LE_4BYTE(__pdesc, 28, 1, __val) 80 SET_BITS_OFFSET_LE(__pdesc, 28, 1, __val)
57#define SET_TX_DESC_NO_ACM(__pdesc, __val) \ 81#define SET_TX_DESC_NO_ACM(__pdesc, __val) \
58 SET_BITS_TO_LE_4BYTE(__pdesc, 29, 1, __val) 82 SET_BITS_OFFSET_LE(__pdesc, 29, 1, __val)
59#define SET_TX_DESC_GF(__pdesc, __val) \ 83#define SET_TX_DESC_GF(__pdesc, __val) \
60 SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val) 84 SET_BITS_OFFSET_LE(__pdesc, 30, 1, __val)
61#define SET_TX_DESC_OWN(__pdesc, __val) \ 85#define SET_TX_DESC_OWN(__pdesc, __val) \
62 SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val) 86 SET_BITS_OFFSET_LE(__pdesc, 31, 1, __val)
63 87
64#define GET_TX_DESC_PKT_SIZE(__pdesc) \ 88#define GET_TX_DESC_PKT_SIZE(__pdesc) \
65 LE_BITS_TO_4BYTE(__pdesc, 0, 16) 89 SHIFT_AND_MASK_LE(__pdesc, 0, 16)
66#define GET_TX_DESC_OFFSET(__pdesc) \ 90#define GET_TX_DESC_OFFSET(__pdesc) \
67 LE_BITS_TO_4BYTE(__pdesc, 16, 8) 91 SHIFT_AND_MASK_LE(__pdesc, 16, 8)
68#define GET_TX_DESC_BMC(__pdesc) \ 92#define GET_TX_DESC_BMC(__pdesc) \
69 LE_BITS_TO_4BYTE(__pdesc, 24, 1) 93 SHIFT_AND_MASK_LE(__pdesc, 24, 1)
70#define GET_TX_DESC_HTC(__pdesc) \ 94#define GET_TX_DESC_HTC(__pdesc) \
71 LE_BITS_TO_4BYTE(__pdesc, 25, 1) 95 SHIFT_AND_MASK_LE(__pdesc, 25, 1)
72#define GET_TX_DESC_LAST_SEG(__pdesc) \ 96#define GET_TX_DESC_LAST_SEG(__pdesc) \
73 LE_BITS_TO_4BYTE(__pdesc, 26, 1) 97 SHIFT_AND_MASK_LE(__pdesc, 26, 1)
74#define GET_TX_DESC_FIRST_SEG(__pdesc) \ 98#define GET_TX_DESC_FIRST_SEG(__pdesc) \
75 LE_BITS_TO_4BYTE(__pdesc, 27, 1) 99 SHIFT_AND_MASK_LE(__pdesc, 27, 1)
76#define GET_TX_DESC_LINIP(__pdesc) \ 100#define GET_TX_DESC_LINIP(__pdesc) \
77 LE_BITS_TO_4BYTE(__pdesc, 28, 1) 101 SHIFT_AND_MASK_LE(__pdesc, 28, 1)
78#define GET_TX_DESC_NO_ACM(__pdesc) \ 102#define GET_TX_DESC_NO_ACM(__pdesc) \
79 LE_BITS_TO_4BYTE(__pdesc, 29, 1) 103 SHIFT_AND_MASK_LE(__pdesc, 29, 1)
80#define GET_TX_DESC_GF(__pdesc) \ 104#define GET_TX_DESC_GF(__pdesc) \
81 LE_BITS_TO_4BYTE(__pdesc, 30, 1) 105 SHIFT_AND_MASK_LE(__pdesc, 30, 1)
82#define GET_TX_DESC_OWN(__pdesc) \ 106#define GET_TX_DESC_OWN(__pdesc) \
83 LE_BITS_TO_4BYTE(__pdesc, 31, 1) 107 SHIFT_AND_MASK_LE(__pdesc, 31, 1)
84 108
85#define SET_TX_DESC_MACID(__pdesc, __val) \ 109#define SET_TX_DESC_MACID(__pdesc, __val) \
86 SET_BITS_TO_LE_4BYTE(__pdesc+4, 0, 5, __val) 110 SET_BITS_OFFSET_LE(__pdesc+4, 0, 5, __val)
87#define SET_TX_DESC_AGG_BREAK(__pdesc, __val) \ 111#define SET_TX_DESC_AGG_BREAK(__pdesc, __val) \
88 SET_BITS_TO_LE_4BYTE(__pdesc+4, 5, 1, __val) 112 SET_BITS_OFFSET_LE(__pdesc+4, 5, 1, __val)
89#define SET_TX_DESC_BK(__pdesc, __val) \ 113#define SET_TX_DESC_BK(__pdesc, __val) \
90 SET_BITS_TO_LE_4BYTE(__pdesc+4, 6, 1, __val) 114 SET_BITS_OFFSET_LE(__pdesc+4, 6, 1, __val)
91#define SET_TX_DESC_RDG_ENABLE(__pdesc, __val) \ 115#define SET_TX_DESC_RDG_ENABLE(__pdesc, __val) \
92 SET_BITS_TO_LE_4BYTE(__pdesc+4, 7, 1, __val) 116 SET_BITS_OFFSET_LE(__pdesc+4, 7, 1, __val)
93#define SET_TX_DESC_QUEUE_SEL(__pdesc, __val) \ 117#define SET_TX_DESC_QUEUE_SEL(__pdesc, __val) \
94 SET_BITS_TO_LE_4BYTE(__pdesc+4, 8, 5, __val) 118 SET_BITS_OFFSET_LE(__pdesc+4, 8, 5, __val)
95#define SET_TX_DESC_RDG_NAV_EXT(__pdesc, __val) \ 119#define SET_TX_DESC_RDG_NAV_EXT(__pdesc, __val) \
96 SET_BITS_TO_LE_4BYTE(__pdesc+4, 13, 1, __val) 120 SET_BITS_OFFSET_LE(__pdesc+4, 13, 1, __val)
97#define SET_TX_DESC_LSIG_TXOP_EN(__pdesc, __val) \ 121#define SET_TX_DESC_LSIG_TXOP_EN(__pdesc, __val) \
98 SET_BITS_TO_LE_4BYTE(__pdesc+4, 14, 1, __val) 122 SET_BITS_OFFSET_LE(__pdesc+4, 14, 1, __val)
99#define SET_TX_DESC_PIFS(__pdesc, __val) \ 123#define SET_TX_DESC_PIFS(__pdesc, __val) \
100 SET_BITS_TO_LE_4BYTE(__pdesc+4, 15, 1, __val) 124 SET_BITS_OFFSET_LE(__pdesc+4, 15, 1, __val)
101#define SET_TX_DESC_RATE_ID(__pdesc, __val) \ 125#define SET_TX_DESC_RATE_ID(__pdesc, __val) \
102 SET_BITS_TO_LE_4BYTE(__pdesc+4, 16, 4, __val) 126 SET_BITS_OFFSET_LE(__pdesc+4, 16, 4, __val)
103#define SET_TX_DESC_NAV_USE_HDR(__pdesc, __val) \ 127#define SET_TX_DESC_NAV_USE_HDR(__pdesc, __val) \
104 SET_BITS_TO_LE_4BYTE(__pdesc+4, 20, 1, __val) 128 SET_BITS_OFFSET_LE(__pdesc+4, 20, 1, __val)
105#define SET_TX_DESC_EN_DESC_ID(__pdesc, __val) \ 129#define SET_TX_DESC_EN_DESC_ID(__pdesc, __val) \
106 SET_BITS_TO_LE_4BYTE(__pdesc+4, 21, 1, __val) 130 SET_BITS_OFFSET_LE(__pdesc+4, 21, 1, __val)
107#define SET_TX_DESC_SEC_TYPE(__pdesc, __val) \ 131#define SET_TX_DESC_SEC_TYPE(__pdesc, __val) \
108 SET_BITS_TO_LE_4BYTE(__pdesc+4, 22, 2, __val) 132 SET_BITS_OFFSET_LE(__pdesc+4, 22, 2, __val)
109#define SET_TX_DESC_PKT_OFFSET(__pdesc, __val) \ 133#define SET_TX_DESC_PKT_OFFSET(__pdesc, __val) \
110 SET_BITS_TO_LE_4BYTE(__pdesc+4, 24, 8, __val) 134 SET_BITS_OFFSET_LE(__pdesc+4, 24, 8, __val)
111 135
112#define GET_TX_DESC_MACID(__pdesc) \ 136#define GET_TX_DESC_MACID(__pdesc) \
113 LE_BITS_TO_4BYTE(__pdesc+4, 0, 5) 137 SHIFT_AND_MASK_LE(__pdesc+4, 0, 5)
114#define GET_TX_DESC_AGG_ENABLE(__pdesc) \ 138#define GET_TX_DESC_AGG_ENABLE(__pdesc) \
115 LE_BITS_TO_4BYTE(__pdesc+4, 5, 1) 139 SHIFT_AND_MASK_LE(__pdesc+4, 5, 1)
116#define GET_TX_DESC_AGG_BREAK(__pdesc) \ 140#define GET_TX_DESC_AGG_BREAK(__pdesc) \
117 LE_BITS_TO_4BYTE(__pdesc+4, 6, 1) 141 SHIFT_AND_MASK_LE(__pdesc+4, 6, 1)
118#define GET_TX_DESC_RDG_ENABLE(__pdesc) \ 142#define GET_TX_DESC_RDG_ENABLE(__pdesc) \
119 LE_BITS_TO_4BYTE(__pdesc+4, 7, 1) 143 SHIFT_AND_MASK_LE(__pdesc+4, 7, 1)
120#define GET_TX_DESC_QUEUE_SEL(__pdesc) \ 144#define GET_TX_DESC_QUEUE_SEL(__pdesc) \
121 LE_BITS_TO_4BYTE(__pdesc+4, 8, 5) 145 SHIFT_AND_MASK_LE(__pdesc+4, 8, 5)
122#define GET_TX_DESC_RDG_NAV_EXT(__pdesc) \ 146#define GET_TX_DESC_RDG_NAV_EXT(__pdesc) \
123 LE_BITS_TO_4BYTE(__pdesc+4, 13, 1) 147 SHIFT_AND_MASK_LE(__pdesc+4, 13, 1)
124#define GET_TX_DESC_LSIG_TXOP_EN(__pdesc) \ 148#define GET_TX_DESC_LSIG_TXOP_EN(__pdesc) \
125 LE_BITS_TO_4BYTE(__pdesc+4, 14, 1) 149 SHIFT_AND_MASK_LE(__pdesc+4, 14, 1)
126#define GET_TX_DESC_PIFS(__pdesc) \ 150#define GET_TX_DESC_PIFS(__pdesc) \
127 LE_BITS_TO_4BYTE(__pdesc+4, 15, 1) 151 SHIFT_AND_MASK_LE(__pdesc+4, 15, 1)
128#define GET_TX_DESC_RATE_ID(__pdesc) \ 152#define GET_TX_DESC_RATE_ID(__pdesc) \
129 LE_BITS_TO_4BYTE(__pdesc+4, 16, 4) 153 SHIFT_AND_MASK_LE(__pdesc+4, 16, 4)
130#define GET_TX_DESC_NAV_USE_HDR(__pdesc) \ 154#define GET_TX_DESC_NAV_USE_HDR(__pdesc) \
131 LE_BITS_TO_4BYTE(__pdesc+4, 20, 1) 155 SHIFT_AND_MASK_LE(__pdesc+4, 20, 1)
132#define GET_TX_DESC_EN_DESC_ID(__pdesc) \ 156#define GET_TX_DESC_EN_DESC_ID(__pdesc) \
133 LE_BITS_TO_4BYTE(__pdesc+4, 21, 1) 157 SHIFT_AND_MASK_LE(__pdesc+4, 21, 1)
134#define GET_TX_DESC_SEC_TYPE(__pdesc) \ 158#define GET_TX_DESC_SEC_TYPE(__pdesc) \
135 LE_BITS_TO_4BYTE(__pdesc+4, 22, 2) 159 SHIFT_AND_MASK_LE(__pdesc+4, 22, 2)
136#define GET_TX_DESC_PKT_OFFSET(__pdesc) \ 160#define GET_TX_DESC_PKT_OFFSET(__pdesc) \
137 LE_BITS_TO_4BYTE(__pdesc+4, 24, 8) 161 SHIFT_AND_MASK_LE(__pdesc+4, 24, 8)
138 162
139#define SET_TX_DESC_RTS_RC(__pdesc, __val) \ 163#define SET_TX_DESC_RTS_RC(__pdesc, __val) \
140 SET_BITS_TO_LE_4BYTE(__pdesc+8, 0, 6, __val) 164 SET_BITS_OFFSET_LE(__pdesc+8, 0, 6, __val)
141#define SET_TX_DESC_DATA_RC(__pdesc, __val) \ 165#define SET_TX_DESC_DATA_RC(__pdesc, __val) \
142 SET_BITS_TO_LE_4BYTE(__pdesc+8, 6, 6, __val) 166 SET_BITS_OFFSET_LE(__pdesc+8, 6, 6, __val)
143#define SET_TX_DESC_BAR_RTY_TH(__pdesc, __val) \ 167#define SET_TX_DESC_BAR_RTY_TH(__pdesc, __val) \
144 SET_BITS_TO_LE_4BYTE(__pdesc+8, 14, 2, __val) 168 SET_BITS_OFFSET_LE(__pdesc+8, 14, 2, __val)
145#define SET_TX_DESC_MORE_FRAG(__pdesc, __val) \ 169#define SET_TX_DESC_MORE_FRAG(__pdesc, __val) \
146 SET_BITS_TO_LE_4BYTE(__pdesc+8, 17, 1, __val) 170 SET_BITS_OFFSET_LE(__pdesc+8, 17, 1, __val)
147#define SET_TX_DESC_RAW(__pdesc, __val) \ 171#define SET_TX_DESC_RAW(__pdesc, __val) \
148 SET_BITS_TO_LE_4BYTE(__pdesc+8, 18, 1, __val) 172 SET_BITS_OFFSET_LE(__pdesc+8, 18, 1, __val)
149#define SET_TX_DESC_CCX(__pdesc, __val) \ 173#define SET_TX_DESC_CCX(__pdesc, __val) \
150 SET_BITS_TO_LE_4BYTE(__pdesc+8, 19, 1, __val) 174 SET_BITS_OFFSET_LE(__pdesc+8, 19, 1, __val)
151#define SET_TX_DESC_AMPDU_DENSITY(__pdesc, __val) \ 175#define SET_TX_DESC_AMPDU_DENSITY(__pdesc, __val) \
152 SET_BITS_TO_LE_4BYTE(__pdesc+8, 20, 3, __val) 176 SET_BITS_OFFSET_LE(__pdesc+8, 20, 3, __val)
153#define SET_TX_DESC_ANTSEL_A(__pdesc, __val) \ 177#define SET_TX_DESC_ANTSEL_A(__pdesc, __val) \
154 SET_BITS_TO_LE_4BYTE(__pdesc+8, 24, 1, __val) 178 SET_BITS_OFFSET_LE(__pdesc+8, 24, 1, __val)
155#define SET_TX_DESC_ANTSEL_B(__pdesc, __val) \ 179#define SET_TX_DESC_ANTSEL_B(__pdesc, __val) \
156 SET_BITS_TO_LE_4BYTE(__pdesc+8, 25, 1, __val) 180 SET_BITS_OFFSET_LE(__pdesc+8, 25, 1, __val)
157#define SET_TX_DESC_TX_ANT_CCK(__pdesc, __val) \ 181#define SET_TX_DESC_TX_ANT_CCK(__pdesc, __val) \
158 SET_BITS_TO_LE_4BYTE(__pdesc+8, 26, 2, __val) 182 SET_BITS_OFFSET_LE(__pdesc+8, 26, 2, __val)
159#define SET_TX_DESC_TX_ANTL(__pdesc, __val) \ 183#define SET_TX_DESC_TX_ANTL(__pdesc, __val) \
160 SET_BITS_TO_LE_4BYTE(__pdesc+8, 28, 2, __val) 184 SET_BITS_OFFSET_LE(__pdesc+8, 28, 2, __val)
161#define SET_TX_DESC_TX_ANT_HT(__pdesc, __val) \ 185#define SET_TX_DESC_TX_ANT_HT(__pdesc, __val) \
162 SET_BITS_TO_LE_4BYTE(__pdesc+8, 30, 2, __val) 186 SET_BITS_OFFSET_LE(__pdesc+8, 30, 2, __val)
163 187
164#define GET_TX_DESC_RTS_RC(__pdesc) \ 188#define GET_TX_DESC_RTS_RC(__pdesc) \
165 LE_BITS_TO_4BYTE(__pdesc+8, 0, 6) 189 SHIFT_AND_MASK_LE(__pdesc+8, 0, 6)
166#define GET_TX_DESC_DATA_RC(__pdesc) \ 190#define GET_TX_DESC_DATA_RC(__pdesc) \
167 LE_BITS_TO_4BYTE(__pdesc+8, 6, 6) 191 SHIFT_AND_MASK_LE(__pdesc+8, 6, 6)
168#define GET_TX_DESC_BAR_RTY_TH(__pdesc) \ 192#define GET_TX_DESC_BAR_RTY_TH(__pdesc) \
169 LE_BITS_TO_4BYTE(__pdesc+8, 14, 2) 193 SHIFT_AND_MASK_LE(__pdesc+8, 14, 2)
170#define GET_TX_DESC_MORE_FRAG(__pdesc) \ 194#define GET_TX_DESC_MORE_FRAG(__pdesc) \
171 LE_BITS_TO_4BYTE(__pdesc+8, 17, 1) 195 SHIFT_AND_MASK_LE(__pdesc+8, 17, 1)
172#define GET_TX_DESC_RAW(__pdesc) \ 196#define GET_TX_DESC_RAW(__pdesc) \
173 LE_BITS_TO_4BYTE(__pdesc+8, 18, 1) 197 SHIFT_AND_MASK_LE(__pdesc+8, 18, 1)
174#define GET_TX_DESC_CCX(__pdesc) \ 198#define GET_TX_DESC_CCX(__pdesc) \
175 LE_BITS_TO_4BYTE(__pdesc+8, 19, 1) 199 SHIFT_AND_MASK_LE(__pdesc+8, 19, 1)
176#define GET_TX_DESC_AMPDU_DENSITY(__pdesc) \ 200#define GET_TX_DESC_AMPDU_DENSITY(__pdesc) \
177 LE_BITS_TO_4BYTE(__pdesc+8, 20, 3) 201 SHIFT_AND_MASK_LE(__pdesc+8, 20, 3)
178#define GET_TX_DESC_ANTSEL_A(__pdesc) \ 202#define GET_TX_DESC_ANTSEL_A(__pdesc) \
179 LE_BITS_TO_4BYTE(__pdesc+8, 24, 1) 203 SHIFT_AND_MASK_LE(__pdesc+8, 24, 1)
180#define GET_TX_DESC_ANTSEL_B(__pdesc) \ 204#define GET_TX_DESC_ANTSEL_B(__pdesc) \
181 LE_BITS_TO_4BYTE(__pdesc+8, 25, 1) 205 SHIFT_AND_MASK_LE(__pdesc+8, 25, 1)
182#define GET_TX_DESC_TX_ANT_CCK(__pdesc) \ 206#define GET_TX_DESC_TX_ANT_CCK(__pdesc) \
183 LE_BITS_TO_4BYTE(__pdesc+8, 26, 2) 207 SHIFT_AND_MASK_LE(__pdesc+8, 26, 2)
184#define GET_TX_DESC_TX_ANTL(__pdesc) \ 208#define GET_TX_DESC_TX_ANTL(__pdesc) \
185 LE_BITS_TO_4BYTE(__pdesc+8, 28, 2) 209 SHIFT_AND_MASK_LE(__pdesc+8, 28, 2)
186#define GET_TX_DESC_TX_ANT_HT(__pdesc) \ 210#define GET_TX_DESC_TX_ANT_HT(__pdesc) \
187 LE_BITS_TO_4BYTE(__pdesc+8, 30, 2) 211 SHIFT_AND_MASK_LE(__pdesc+8, 30, 2)
188 212
189#define SET_TX_DESC_NEXT_HEAP_PAGE(__pdesc, __val) \ 213#define SET_TX_DESC_NEXT_HEAP_PAGE(__pdesc, __val) \
190 SET_BITS_TO_LE_4BYTE(__pdesc+12, 0, 8, __val) 214 SET_BITS_OFFSET_LE(__pdesc+12, 0, 8, __val)
191#define SET_TX_DESC_TAIL_PAGE(__pdesc, __val) \ 215#define SET_TX_DESC_TAIL_PAGE(__pdesc, __val) \
192 SET_BITS_TO_LE_4BYTE(__pdesc+12, 8, 8, __val) 216 SET_BITS_OFFSET_LE(__pdesc+12, 8, 8, __val)
193#define SET_TX_DESC_SEQ(__pdesc, __val) \ 217#define SET_TX_DESC_SEQ(__pdesc, __val) \
194 SET_BITS_TO_LE_4BYTE(__pdesc+12, 16, 12, __val) 218 SET_BITS_OFFSET_LE(__pdesc+12, 16, 12, __val)
195#define SET_TX_DESC_PKT_ID(__pdesc, __val) \ 219#define SET_TX_DESC_PKT_ID(__pdesc, __val) \
196 SET_BITS_TO_LE_4BYTE(__pdesc+12, 28, 4, __val) 220 SET_BITS_OFFSET_LE(__pdesc+12, 28, 4, __val)
197 221
198#define GET_TX_DESC_NEXT_HEAP_PAGE(__pdesc) \ 222#define GET_TX_DESC_NEXT_HEAP_PAGE(__pdesc) \
199 LE_BITS_TO_4BYTE(__pdesc+12, 0, 8) 223 SHIFT_AND_MASK_LE(__pdesc+12, 0, 8)
200#define GET_TX_DESC_TAIL_PAGE(__pdesc) \ 224#define GET_TX_DESC_TAIL_PAGE(__pdesc) \
201 LE_BITS_TO_4BYTE(__pdesc+12, 8, 8) 225 SHIFT_AND_MASK_LE(__pdesc+12, 8, 8)
202#define GET_TX_DESC_SEQ(__pdesc) \ 226#define GET_TX_DESC_SEQ(__pdesc) \
203 LE_BITS_TO_4BYTE(__pdesc+12, 16, 12) 227 SHIFT_AND_MASK_LE(__pdesc+12, 16, 12)
204#define GET_TX_DESC_PKT_ID(__pdesc) \ 228#define GET_TX_DESC_PKT_ID(__pdesc) \
205 LE_BITS_TO_4BYTE(__pdesc+12, 28, 4) 229 SHIFT_AND_MASK_LE(__pdesc+12, 28, 4)
206 230
207#define SET_TX_DESC_RTS_RATE(__pdesc, __val) \ 231#define SET_TX_DESC_RTS_RATE(__pdesc, __val) \
208 SET_BITS_TO_LE_4BYTE(__pdesc+16, 0, 5, __val) 232 SET_BITS_OFFSET_LE(__pdesc+16, 0, 5, __val)
209#define SET_TX_DESC_AP_DCFE(__pdesc, __val) \ 233#define SET_TX_DESC_AP_DCFE(__pdesc, __val) \
210 SET_BITS_TO_LE_4BYTE(__pdesc+16, 5, 1, __val) 234 SET_BITS_OFFSET_LE(__pdesc+16, 5, 1, __val)
211#define SET_TX_DESC_QOS(__pdesc, __val) \ 235#define SET_TX_DESC_QOS(__pdesc, __val) \
212 SET_BITS_TO_LE_4BYTE(__pdesc+16, 6, 1, __val) 236 SET_BITS_OFFSET_LE(__pdesc+16, 6, 1, __val)
213#define SET_TX_DESC_HWSEQ_EN(__pdesc, __val) \ 237#define SET_TX_DESC_HWSEQ_EN(__pdesc, __val) \
214 SET_BITS_TO_LE_4BYTE(__pdesc+16, 7, 1, __val) 238 SET_BITS_OFFSET_LE(__pdesc+16, 7, 1, __val)
215#define SET_TX_DESC_USE_RATE(__pdesc, __val) \ 239#define SET_TX_DESC_USE_RATE(__pdesc, __val) \
216 SET_BITS_TO_LE_4BYTE(__pdesc+16, 8, 1, __val) 240 SET_BITS_OFFSET_LE(__pdesc+16, 8, 1, __val)
217#define SET_TX_DESC_DISABLE_RTS_FB(__pdesc, __val) \ 241#define SET_TX_DESC_DISABLE_RTS_FB(__pdesc, __val) \
218 SET_BITS_TO_LE_4BYTE(__pdesc+16, 9, 1, __val) 242 SET_BITS_OFFSET_LE(__pdesc+16, 9, 1, __val)
219#define SET_TX_DESC_DISABLE_FB(__pdesc, __val) \ 243#define SET_TX_DESC_DISABLE_FB(__pdesc, __val) \
220 SET_BITS_TO_LE_4BYTE(__pdesc+16, 10, 1, __val) 244 SET_BITS_OFFSET_LE(__pdesc+16, 10, 1, __val)
221#define SET_TX_DESC_CTS2SELF(__pdesc, __val) \ 245#define SET_TX_DESC_CTS2SELF(__pdesc, __val) \
222 SET_BITS_TO_LE_4BYTE(__pdesc+16, 11, 1, __val) 246 SET_BITS_OFFSET_LE(__pdesc+16, 11, 1, __val)
223#define SET_TX_DESC_RTS_ENABLE(__pdesc, __val) \ 247#define SET_TX_DESC_RTS_ENABLE(__pdesc, __val) \
224 SET_BITS_TO_LE_4BYTE(__pdesc+16, 12, 1, __val) 248 SET_BITS_OFFSET_LE(__pdesc+16, 12, 1, __val)
225#define SET_TX_DESC_HW_RTS_ENABLE(__pdesc, __val) \ 249#define SET_TX_DESC_HW_RTS_ENABLE(__pdesc, __val) \
226 SET_BITS_TO_LE_4BYTE(__pdesc+16, 13, 1, __val) 250 SET_BITS_OFFSET_LE(__pdesc+16, 13, 1, __val)
227#define SET_TX_DESC_PORT_ID(__pdesc, __val) \ 251#define SET_TX_DESC_PORT_ID(__pdesc, __val) \
228 SET_BITS_TO_LE_4BYTE(__pdesc+16, 14, 1, __val) 252 SET_BITS_OFFSET_LE(__pdesc+16, 14, 1, __val)
229#define SET_TX_DESC_WAIT_DCTS(__pdesc, __val) \ 253#define SET_TX_DESC_WAIT_DCTS(__pdesc, __val) \
230 SET_BITS_TO_LE_4BYTE(__pdesc+16, 18, 1, __val) 254 SET_BITS_OFFSET_LE(__pdesc+16, 18, 1, __val)
231#define SET_TX_DESC_CTS2AP_EN(__pdesc, __val) \ 255#define SET_TX_DESC_CTS2AP_EN(__pdesc, __val) \
232 SET_BITS_TO_LE_4BYTE(__pdesc+16, 19, 1, __val) 256 SET_BITS_OFFSET_LE(__pdesc+16, 19, 1, __val)
233#define SET_TX_DESC_TX_SUB_CARRIER(__pdesc, __val) \ 257#define SET_TX_DESC_TX_SUB_CARRIER(__pdesc, __val) \
234 SET_BITS_TO_LE_4BYTE(__pdesc+16, 20, 2, __val) 258 SET_BITS_OFFSET_LE(__pdesc+16, 20, 2, __val)
235#define SET_TX_DESC_TX_STBC(__pdesc, __val) \ 259#define SET_TX_DESC_TX_STBC(__pdesc, __val) \
236 SET_BITS_TO_LE_4BYTE(__pdesc+16, 22, 2, __val) 260 SET_BITS_OFFSET_LE(__pdesc+16, 22, 2, __val)
237#define SET_TX_DESC_DATA_SHORT(__pdesc, __val) \ 261#define SET_TX_DESC_DATA_SHORT(__pdesc, __val) \
238 SET_BITS_TO_LE_4BYTE(__pdesc+16, 24, 1, __val) 262 SET_BITS_OFFSET_LE(__pdesc+16, 24, 1, __val)
239#define SET_TX_DESC_DATA_BW(__pdesc, __val) \ 263#define SET_TX_DESC_DATA_BW(__pdesc, __val) \
240 SET_BITS_TO_LE_4BYTE(__pdesc+16, 25, 1, __val) 264 SET_BITS_OFFSET_LE(__pdesc+16, 25, 1, __val)
241#define SET_TX_DESC_RTS_SHORT(__pdesc, __val) \ 265#define SET_TX_DESC_RTS_SHORT(__pdesc, __val) \
242 SET_BITS_TO_LE_4BYTE(__pdesc+16, 26, 1, __val) 266 SET_BITS_OFFSET_LE(__pdesc+16, 26, 1, __val)
243#define SET_TX_DESC_RTS_BW(__pdesc, __val) \ 267#define SET_TX_DESC_RTS_BW(__pdesc, __val) \
244 SET_BITS_TO_LE_4BYTE(__pdesc+16, 27, 1, __val) 268 SET_BITS_OFFSET_LE(__pdesc+16, 27, 1, __val)
245#define SET_TX_DESC_RTS_SC(__pdesc, __val) \ 269#define SET_TX_DESC_RTS_SC(__pdesc, __val) \
246 SET_BITS_TO_LE_4BYTE(__pdesc+16, 28, 2, __val) 270 SET_BITS_OFFSET_LE(__pdesc+16, 28, 2, __val)
247#define SET_TX_DESC_RTS_STBC(__pdesc, __val) \ 271#define SET_TX_DESC_RTS_STBC(__pdesc, __val) \
248 SET_BITS_TO_LE_4BYTE(__pdesc+16, 30, 2, __val) 272 SET_BITS_OFFSET_LE(__pdesc+16, 30, 2, __val)
249 273
250#define GET_TX_DESC_RTS_RATE(__pdesc) \ 274#define GET_TX_DESC_RTS_RATE(__pdesc) \
251 LE_BITS_TO_4BYTE(__pdesc+16, 0, 5) 275 SHIFT_AND_MASK_LE(__pdesc+16, 0, 5)
252#define GET_TX_DESC_AP_DCFE(__pdesc) \ 276#define GET_TX_DESC_AP_DCFE(__pdesc) \
253 LE_BITS_TO_4BYTE(__pdesc+16, 5, 1) 277 SHIFT_AND_MASK_LE(__pdesc+16, 5, 1)
254#define GET_TX_DESC_QOS(__pdesc) \ 278#define GET_TX_DESC_QOS(__pdesc) \
255 LE_BITS_TO_4BYTE(__pdesc+16, 6, 1) 279 SHIFT_AND_MASK_LE(__pdesc+16, 6, 1)
256#define GET_TX_DESC_HWSEQ_EN(__pdesc) \ 280#define GET_TX_DESC_HWSEQ_EN(__pdesc) \
257 LE_BITS_TO_4BYTE(__pdesc+16, 7, 1) 281 SHIFT_AND_MASK_LE(__pdesc+16, 7, 1)
258#define GET_TX_DESC_USE_RATE(__pdesc) \ 282#define GET_TX_DESC_USE_RATE(__pdesc) \
259 LE_BITS_TO_4BYTE(__pdesc+16, 8, 1) 283 SHIFT_AND_MASK_LE(__pdesc+16, 8, 1)
260#define GET_TX_DESC_DISABLE_RTS_FB(__pdesc) \ 284#define GET_TX_DESC_DISABLE_RTS_FB(__pdesc) \
261 LE_BITS_TO_4BYTE(__pdesc+16, 9, 1) 285 SHIFT_AND_MASK_LE(__pdesc+16, 9, 1)
262#define GET_TX_DESC_DISABLE_FB(__pdesc) \ 286#define GET_TX_DESC_DISABLE_FB(__pdesc) \
263 LE_BITS_TO_4BYTE(__pdesc+16, 10, 1) 287 SHIFT_AND_MASK_LE(__pdesc+16, 10, 1)
264#define GET_TX_DESC_CTS2SELF(__pdesc) \ 288#define GET_TX_DESC_CTS2SELF(__pdesc) \
265 LE_BITS_TO_4BYTE(__pdesc+16, 11, 1) 289 SHIFT_AND_MASK_LE(__pdesc+16, 11, 1)
266#define GET_TX_DESC_RTS_ENABLE(__pdesc) \ 290#define GET_TX_DESC_RTS_ENABLE(__pdesc) \
267 LE_BITS_TO_4BYTE(__pdesc+16, 12, 1) 291 SHIFT_AND_MASK_LE(__pdesc+16, 12, 1)
268#define GET_TX_DESC_HW_RTS_ENABLE(__pdesc) \ 292#define GET_TX_DESC_HW_RTS_ENABLE(__pdesc) \
269 LE_BITS_TO_4BYTE(__pdesc+16, 13, 1) 293 SHIFT_AND_MASK_LE(__pdesc+16, 13, 1)
270#define GET_TX_DESC_PORT_ID(__pdesc) \ 294#define GET_TX_DESC_PORT_ID(__pdesc) \
271 LE_BITS_TO_4BYTE(__pdesc+16, 14, 1) 295 SHIFT_AND_MASK_LE(__pdesc+16, 14, 1)
272#define GET_TX_DESC_WAIT_DCTS(__pdesc) \ 296#define GET_TX_DESC_WAIT_DCTS(__pdesc) \
273 LE_BITS_TO_4BYTE(__pdesc+16, 18, 1) 297 SHIFT_AND_MASK_LE(__pdesc+16, 18, 1)
274#define GET_TX_DESC_CTS2AP_EN(__pdesc) \ 298#define GET_TX_DESC_CTS2AP_EN(__pdesc) \
275 LE_BITS_TO_4BYTE(__pdesc+16, 19, 1) 299 SHIFT_AND_MASK_LE(__pdesc+16, 19, 1)
276#define GET_TX_DESC_TX_SUB_CARRIER(__pdesc) \ 300#define GET_TX_DESC_TX_SUB_CARRIER(__pdesc) \
277 LE_BITS_TO_4BYTE(__pdesc+16, 20, 2) 301 SHIFT_AND_MASK_LE(__pdesc+16, 20, 2)
278#define GET_TX_DESC_TX_STBC(__pdesc) \ 302#define GET_TX_DESC_TX_STBC(__pdesc) \
279 LE_BITS_TO_4BYTE(__pdesc+16, 22, 2) 303 SHIFT_AND_MASK_LE(__pdesc+16, 22, 2)
280#define GET_TX_DESC_DATA_SHORT(__pdesc) \ 304#define GET_TX_DESC_DATA_SHORT(__pdesc) \
281 LE_BITS_TO_4BYTE(__pdesc+16, 24, 1) 305 SHIFT_AND_MASK_LE(__pdesc+16, 24, 1)
282#define GET_TX_DESC_DATA_BW(__pdesc) \ 306#define GET_TX_DESC_DATA_BW(__pdesc) \
283 LE_BITS_TO_4BYTE(__pdesc+16, 25, 1) 307 SHIFT_AND_MASK_LE(__pdesc+16, 25, 1)
284#define GET_TX_DESC_RTS_SHORT(__pdesc) \ 308#define GET_TX_DESC_RTS_SHORT(__pdesc) \
285 LE_BITS_TO_4BYTE(__pdesc+16, 26, 1) 309 SHIFT_AND_MASK_LE(__pdesc+16, 26, 1)
286#define GET_TX_DESC_RTS_BW(__pdesc) \ 310#define GET_TX_DESC_RTS_BW(__pdesc) \
287 LE_BITS_TO_4BYTE(__pdesc+16, 27, 1) 311 SHIFT_AND_MASK_LE(__pdesc+16, 27, 1)
288#define GET_TX_DESC_RTS_SC(__pdesc) \ 312#define GET_TX_DESC_RTS_SC(__pdesc) \
289 LE_BITS_TO_4BYTE(__pdesc+16, 28, 2) 313 SHIFT_AND_MASK_LE(__pdesc+16, 28, 2)
290#define GET_TX_DESC_RTS_STBC(__pdesc) \ 314#define GET_TX_DESC_RTS_STBC(__pdesc) \
291 LE_BITS_TO_4BYTE(__pdesc+16, 30, 2) 315 SHIFT_AND_MASK_LE(__pdesc+16, 30, 2)
292 316
293#define SET_TX_DESC_TX_RATE(__pdesc, __val) \ 317#define SET_TX_DESC_TX_RATE(__pdesc, __val) \
294 SET_BITS_TO_LE_4BYTE(__pdesc+20, 0, 6, __val) 318 SET_BITS_OFFSET_LE(__pdesc+20, 0, 6, __val)
295#define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val) \ 319#define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val) \
296 SET_BITS_TO_LE_4BYTE(__pdesc+20, 6, 1, __val) 320 SET_BITS_OFFSET_LE(__pdesc+20, 6, 1, __val)
297#define SET_TX_DESC_CCX_TAG(__pdesc, __val) \ 321#define SET_TX_DESC_CCX_TAG(__pdesc, __val) \
298 SET_BITS_TO_LE_4BYTE(__pdesc+20, 7, 1, __val) 322 SET_BITS_OFFSET_LE(__pdesc+20, 7, 1, __val)
299#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc, __val) \ 323#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc, __val) \
300 SET_BITS_TO_LE_4BYTE(__pdesc+20, 8, 5, __val) 324 SET_BITS_OFFSET_LE(__pdesc+20, 8, 5, __val)
301#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc, __val) \ 325#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc, __val) \
302 SET_BITS_TO_LE_4BYTE(__pdesc+20, 13, 4, __val) 326 SET_BITS_OFFSET_LE(__pdesc+20, 13, 4, __val)
303#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc, __val) \ 327#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc, __val) \
304 SET_BITS_TO_LE_4BYTE(__pdesc+20, 17, 1, __val) 328 SET_BITS_OFFSET_LE(__pdesc+20, 17, 1, __val)
305#define SET_TX_DESC_DATA_RETRY_LIMIT(__pdesc, __val) \ 329#define SET_TX_DESC_DATA_RETRY_LIMIT(__pdesc, __val) \
306 SET_BITS_TO_LE_4BYTE(__pdesc+20, 18, 6, __val) 330 SET_BITS_OFFSET_LE(__pdesc+20, 18, 6, __val)
307#define SET_TX_DESC_USB_TXAGG_NUM(__pdesc, __val) \ 331#define SET_TX_DESC_USB_TXAGG_NUM(__pdesc, __val) \
308 SET_BITS_TO_LE_4BYTE(__pdesc+20, 24, 8, __val) 332 SET_BITS_OFFSET_LE(__pdesc+20, 24, 8, __val)
309 333
310#define GET_TX_DESC_TX_RATE(__pdesc) \ 334#define GET_TX_DESC_TX_RATE(__pdesc) \
311 LE_BITS_TO_4BYTE(__pdesc+20, 0, 6) 335 SHIFT_AND_MASK_LE(__pdesc+20, 0, 6)
312#define GET_TX_DESC_DATA_SHORTGI(__pdesc) \ 336#define GET_TX_DESC_DATA_SHORTGI(__pdesc) \
313 LE_BITS_TO_4BYTE(__pdesc+20, 6, 1) 337 SHIFT_AND_MASK_LE(__pdesc+20, 6, 1)
314#define GET_TX_DESC_CCX_TAG(__pdesc) \ 338#define GET_TX_DESC_CCX_TAG(__pdesc) \
315 LE_BITS_TO_4BYTE(__pdesc+20, 7, 1) 339 SHIFT_AND_MASK_LE(__pdesc+20, 7, 1)
316#define GET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc) \ 340#define GET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc) \
317 LE_BITS_TO_4BYTE(__pdesc+20, 8, 5) 341 SHIFT_AND_MASK_LE(__pdesc+20, 8, 5)
318#define GET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc) \ 342#define GET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc) \
319 LE_BITS_TO_4BYTE(__pdesc+20, 13, 4) 343 SHIFT_AND_MASK_LE(__pdesc+20, 13, 4)
320#define GET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc) \ 344#define GET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc) \
321 LE_BITS_TO_4BYTE(__pdesc+20, 17, 1) 345 SHIFT_AND_MASK_LE(__pdesc+20, 17, 1)
322#define GET_TX_DESC_DATA_RETRY_LIMIT(__pdesc) \ 346#define GET_TX_DESC_DATA_RETRY_LIMIT(__pdesc) \
323 LE_BITS_TO_4BYTE(__pdesc+20, 18, 6) 347 SHIFT_AND_MASK_LE(__pdesc+20, 18, 6)
324#define GET_TX_DESC_USB_TXAGG_NUM(__pdesc) \ 348#define GET_TX_DESC_USB_TXAGG_NUM(__pdesc) \
325 LE_BITS_TO_4BYTE(__pdesc+20, 24, 8) 349 SHIFT_AND_MASK_LE(__pdesc+20, 24, 8)
326 350
327#define SET_TX_DESC_TXAGC_A(__pdesc, __val) \ 351#define SET_TX_DESC_TXAGC_A(__pdesc, __val) \
328 SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 5, __val) 352 SET_BITS_OFFSET_LE(__pdesc+24, 0, 5, __val)
329#define SET_TX_DESC_TXAGC_B(__pdesc, __val) \ 353#define SET_TX_DESC_TXAGC_B(__pdesc, __val) \
330 SET_BITS_TO_LE_4BYTE(__pdesc+24, 5, 5, __val) 354 SET_BITS_OFFSET_LE(__pdesc+24, 5, 5, __val)
331#define SET_TX_DESC_USE_MAX_LEN(__pdesc, __val) \ 355#define SET_TX_DESC_USE_MAX_LEN(__pdesc, __val) \
332 SET_BITS_TO_LE_4BYTE(__pdesc+24, 10, 1, __val) 356 SET_BITS_OFFSET_LE(__pdesc+24, 10, 1, __val)
333#define SET_TX_DESC_MAX_AGG_NUM(__pdesc, __val) \ 357#define SET_TX_DESC_MAX_AGG_NUM(__pdesc, __val) \
334 SET_BITS_TO_LE_4BYTE(__pdesc+24, 11, 5, __val) 358 SET_BITS_OFFSET_LE(__pdesc+24, 11, 5, __val)
335#define SET_TX_DESC_MCSG1_MAX_LEN(__pdesc, __val) \ 359#define SET_TX_DESC_MCSG1_MAX_LEN(__pdesc, __val) \
336 SET_BITS_TO_LE_4BYTE(__pdesc+24, 16, 4, __val) 360 SET_BITS_OFFSET_LE(__pdesc+24, 16, 4, __val)
337#define SET_TX_DESC_MCSG2_MAX_LEN(__pdesc, __val) \ 361#define SET_TX_DESC_MCSG2_MAX_LEN(__pdesc, __val) \
338 SET_BITS_TO_LE_4BYTE(__pdesc+24, 20, 4, __val) 362 SET_BITS_OFFSET_LE(__pdesc+24, 20, 4, __val)
339#define SET_TX_DESC_MCSG3_MAX_LEN(__pdesc, __val) \ 363#define SET_TX_DESC_MCSG3_MAX_LEN(__pdesc, __val) \
340 SET_BITS_TO_LE_4BYTE(__pdesc+24, 24, 4, __val) 364 SET_BITS_OFFSET_LE(__pdesc+24, 24, 4, __val)
341#define SET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc, __val) \ 365#define SET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc, __val) \
342 SET_BITS_TO_LE_4BYTE(__pdesc+24, 28, 4, __val) 366 SET_BITS_OFFSET_LE(__pdesc+24, 28, 4, __val)
343 367
344#define GET_TX_DESC_TXAGC_A(__pdesc) \ 368#define GET_TX_DESC_TXAGC_A(__pdesc) \
345 LE_BITS_TO_4BYTE(__pdesc+24, 0, 5) 369 SHIFT_AND_MASK_LE(__pdesc+24, 0, 5)
346#define GET_TX_DESC_TXAGC_B(__pdesc) \ 370#define GET_TX_DESC_TXAGC_B(__pdesc) \
347 LE_BITS_TO_4BYTE(__pdesc+24, 5, 5) 371 SHIFT_AND_MASK_LE(__pdesc+24, 5, 5)
348#define GET_TX_DESC_USE_MAX_LEN(__pdesc) \ 372#define GET_TX_DESC_USE_MAX_LEN(__pdesc) \
349 LE_BITS_TO_4BYTE(__pdesc+24, 10, 1) 373 SHIFT_AND_MASK_LE(__pdesc+24, 10, 1)
350#define GET_TX_DESC_MAX_AGG_NUM(__pdesc) \ 374#define GET_TX_DESC_MAX_AGG_NUM(__pdesc) \
351 LE_BITS_TO_4BYTE(__pdesc+24, 11, 5) 375 SHIFT_AND_MASK_LE(__pdesc+24, 11, 5)
352#define GET_TX_DESC_MCSG1_MAX_LEN(__pdesc) \ 376#define GET_TX_DESC_MCSG1_MAX_LEN(__pdesc) \
353 LE_BITS_TO_4BYTE(__pdesc+24, 16, 4) 377 SHIFT_AND_MASK_LE(__pdesc+24, 16, 4)
354#define GET_TX_DESC_MCSG2_MAX_LEN(__pdesc) \ 378#define GET_TX_DESC_MCSG2_MAX_LEN(__pdesc) \
355 LE_BITS_TO_4BYTE(__pdesc+24, 20, 4) 379 SHIFT_AND_MASK_LE(__pdesc+24, 20, 4)
356#define GET_TX_DESC_MCSG3_MAX_LEN(__pdesc) \ 380#define GET_TX_DESC_MCSG3_MAX_LEN(__pdesc) \
357 LE_BITS_TO_4BYTE(__pdesc+24, 24, 4) 381 SHIFT_AND_MASK_LE(__pdesc+24, 24, 4)
358#define GET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc) \ 382#define GET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc) \
359 LE_BITS_TO_4BYTE(__pdesc+24, 28, 4) 383 SHIFT_AND_MASK_LE(__pdesc+24, 28, 4)
360 384
361#define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val) \ 385#define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val) \
362 SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 16, __val) 386 SET_BITS_OFFSET_LE(__pdesc+28, 0, 16, __val)
363#define SET_TX_DESC_MCSG4_MAX_LEN(__pdesc, __val) \ 387#define SET_TX_DESC_MCSG4_MAX_LEN(__pdesc, __val) \
364 SET_BITS_TO_LE_4BYTE(__pdesc+28, 16, 4, __val) 388 SET_BITS_OFFSET_LE(__pdesc+28, 16, 4, __val)
365#define SET_TX_DESC_MCSG5_MAX_LEN(__pdesc, __val) \ 389#define SET_TX_DESC_MCSG5_MAX_LEN(__pdesc, __val) \
366 SET_BITS_TO_LE_4BYTE(__pdesc+28, 20, 4, __val) 390 SET_BITS_OFFSET_LE(__pdesc+28, 20, 4, __val)
367#define SET_TX_DESC_MCSG6_MAX_LEN(__pdesc, __val) \ 391#define SET_TX_DESC_MCSG6_MAX_LEN(__pdesc, __val) \
368 SET_BITS_TO_LE_4BYTE(__pdesc+28, 24, 4, __val) 392 SET_BITS_OFFSET_LE(__pdesc+28, 24, 4, __val)
369#define SET_TX_DESC_MCS15_SGI_MAX_LEN(__pdesc, __val) \ 393#define SET_TX_DESC_MCS15_SGI_MAX_LEN(__pdesc, __val) \
370 SET_BITS_TO_LE_4BYTE(__pdesc+28, 28, 4, __val) 394 SET_BITS_OFFSET_LE(__pdesc+28, 28, 4, __val)
371 395
372#define GET_TX_DESC_TX_BUFFER_SIZE(__pdesc) \ 396#define GET_TX_DESC_TX_BUFFER_SIZE(__pdesc) \
373 LE_BITS_TO_4BYTE(__pdesc+28, 0, 16) 397 SHIFT_AND_MASK_LE(__pdesc+28, 0, 16)
374#define GET_TX_DESC_MCSG4_MAX_LEN(__pdesc) \ 398#define GET_TX_DESC_MCSG4_MAX_LEN(__pdesc) \
375 LE_BITS_TO_4BYTE(__pdesc+28, 16, 4) 399 SHIFT_AND_MASK_LE(__pdesc+28, 16, 4)
376#define GET_TX_DESC_MCSG5_MAX_LEN(__pdesc) \ 400#define GET_TX_DESC_MCSG5_MAX_LEN(__pdesc) \
377 LE_BITS_TO_4BYTE(__pdesc+28, 20, 4) 401 SHIFT_AND_MASK_LE(__pdesc+28, 20, 4)
378#define GET_TX_DESC_MCSG6_MAX_LEN(__pdesc) \ 402#define GET_TX_DESC_MCSG6_MAX_LEN(__pdesc) \
379 LE_BITS_TO_4BYTE(__pdesc+28, 24, 4) 403 SHIFT_AND_MASK_LE(__pdesc+28, 24, 4)
380#define GET_TX_DESC_MCS15_SGI_MAX_LEN(__pdesc) \ 404#define GET_TX_DESC_MCS15_SGI_MAX_LEN(__pdesc) \
381 LE_BITS_TO_4BYTE(__pdesc+28, 28, 4) 405 SHIFT_AND_MASK_LE(__pdesc+28, 28, 4)
382 406
383#define SET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc, __val) \ 407#define SET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc, __val) \
384 SET_BITS_TO_LE_4BYTE(__pdesc+32, 0, 32, __val) 408 SET_BITS_OFFSET_LE(__pdesc+32, 0, 32, __val)
385#define SET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc, __val) \ 409#define SET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc, __val) \
386 SET_BITS_TO_LE_4BYTE(__pdesc+36, 0, 32, __val) 410 SET_BITS_OFFSET_LE(__pdesc+36, 0, 32, __val)
387 411
388#define GET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc) \ 412#define GET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc) \
389 LE_BITS_TO_4BYTE(__pdesc+32, 0, 32) 413 SHIFT_AND_MASK_LE(__pdesc+32, 0, 32)
390#define GET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc) \ 414#define GET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc) \
391 LE_BITS_TO_4BYTE(__pdesc+36, 0, 32) 415 SHIFT_AND_MASK_LE(__pdesc+36, 0, 32)
392 416
393#define SET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc, __val) \ 417#define SET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc, __val) \
394 SET_BITS_TO_LE_4BYTE(__pdesc+40, 0, 32, __val) 418 SET_BITS_OFFSET_LE(__pdesc+40, 0, 32, __val)
395#define SET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc, __val) \ 419#define SET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc, __val) \
396 SET_BITS_TO_LE_4BYTE(__pdesc+44, 0, 32, __val) 420 SET_BITS_OFFSET_LE(__pdesc+44, 0, 32, __val)
397 421
398#define GET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc) \ 422#define GET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc) \
399 LE_BITS_TO_4BYTE(__pdesc+40, 0, 32) 423 SHIFT_AND_MASK_LE(__pdesc+40, 0, 32)
400#define GET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc) \ 424#define GET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc) \
401 LE_BITS_TO_4BYTE(__pdesc+44, 0, 32) 425 SHIFT_AND_MASK_LE(__pdesc+44, 0, 32)
402 426
403#define GET_RX_DESC_PKT_LEN(__pdesc) \ 427#define GET_RX_DESC_PKT_LEN(__pdesc) \
404 LE_BITS_TO_4BYTE(__pdesc, 0, 14) 428 SHIFT_AND_MASK_LE(__pdesc, 0, 14)
405#define GET_RX_DESC_CRC32(__pdesc) \ 429#define GET_RX_DESC_CRC32(__pdesc) \
406 LE_BITS_TO_4BYTE(__pdesc, 14, 1) 430 SHIFT_AND_MASK_LE(__pdesc, 14, 1)
407#define GET_RX_DESC_ICV(__pdesc) \ 431#define GET_RX_DESC_ICV(__pdesc) \
408 LE_BITS_TO_4BYTE(__pdesc, 15, 1) 432 SHIFT_AND_MASK_LE(__pdesc, 15, 1)
409#define GET_RX_DESC_DRV_INFO_SIZE(__pdesc) \ 433#define GET_RX_DESC_DRV_INFO_SIZE(__pdesc) \
410 LE_BITS_TO_4BYTE(__pdesc, 16, 4) 434 SHIFT_AND_MASK_LE(__pdesc, 16, 4)
411#define GET_RX_DESC_SECURITY(__pdesc) \ 435#define GET_RX_DESC_SECURITY(__pdesc) \
412 LE_BITS_TO_4BYTE(__pdesc, 20, 3) 436 SHIFT_AND_MASK_LE(__pdesc, 20, 3)
413#define GET_RX_DESC_QOS(__pdesc) \ 437#define GET_RX_DESC_QOS(__pdesc) \
414 LE_BITS_TO_4BYTE(__pdesc, 23, 1) 438 SHIFT_AND_MASK_LE(__pdesc, 23, 1)
415#define GET_RX_DESC_SHIFT(__pdesc) \ 439#define GET_RX_DESC_SHIFT(__pdesc) \
416 LE_BITS_TO_4BYTE(__pdesc, 24, 2) 440 SHIFT_AND_MASK_LE(__pdesc, 24, 2)
417#define GET_RX_DESC_PHYST(__pdesc) \ 441#define GET_RX_DESC_PHYST(__pdesc) \
418 LE_BITS_TO_4BYTE(__pdesc, 26, 1) 442 SHIFT_AND_MASK_LE(__pdesc, 26, 1)
419#define GET_RX_DESC_SWDEC(__pdesc) \ 443#define GET_RX_DESC_SWDEC(__pdesc) \
420 LE_BITS_TO_4BYTE(__pdesc, 27, 1) 444 SHIFT_AND_MASK_LE(__pdesc, 27, 1)
421#define GET_RX_DESC_LS(__pdesc) \ 445#define GET_RX_DESC_LS(__pdesc) \
422 LE_BITS_TO_4BYTE(__pdesc, 28, 1) 446 SHIFT_AND_MASK_LE(__pdesc, 28, 1)
423#define GET_RX_DESC_FS(__pdesc) \ 447#define GET_RX_DESC_FS(__pdesc) \
424 LE_BITS_TO_4BYTE(__pdesc, 29, 1) 448 SHIFT_AND_MASK_LE(__pdesc, 29, 1)
425#define GET_RX_DESC_EOR(__pdesc) \ 449#define GET_RX_DESC_EOR(__pdesc) \
426 LE_BITS_TO_4BYTE(__pdesc, 30, 1) 450 SHIFT_AND_MASK_LE(__pdesc, 30, 1)
427#define GET_RX_DESC_OWN(__pdesc) \ 451#define GET_RX_DESC_OWN(__pdesc) \
428 LE_BITS_TO_4BYTE(__pdesc, 31, 1) 452 SHIFT_AND_MASK_LE(__pdesc, 31, 1)
429 453
430#define SET_RX_DESC_PKT_LEN(__pdesc, __val) \ 454#define SET_RX_DESC_PKT_LEN(__pdesc, __val) \
431 SET_BITS_TO_LE_4BYTE(__pdesc, 0, 14, __val) 455 SET_BITS_OFFSET_LE(__pdesc, 0, 14, __val)
432#define SET_RX_DESC_EOR(__pdesc, __val) \ 456#define SET_RX_DESC_EOR(__pdesc, __val) \
433 SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val) 457 SET_BITS_OFFSET_LE(__pdesc, 30, 1, __val)
434#define SET_RX_DESC_OWN(__pdesc, __val) \ 458#define SET_RX_DESC_OWN(__pdesc, __val) \
435 SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val) 459 SET_BITS_OFFSET_LE(__pdesc, 31, 1, __val)
436 460
437#define GET_RX_DESC_MACID(__pdesc) \ 461#define GET_RX_DESC_MACID(__pdesc) \
438 LE_BITS_TO_4BYTE(__pdesc+4, 0, 5) 462 SHIFT_AND_MASK_LE(__pdesc+4, 0, 5)
439#define GET_RX_DESC_TID(__pdesc) \ 463#define GET_RX_DESC_TID(__pdesc) \
440 LE_BITS_TO_4BYTE(__pdesc+4, 5, 4) 464 SHIFT_AND_MASK_LE(__pdesc+4, 5, 4)
441#define GET_RX_DESC_HWRSVD(__pdesc) \ 465#define GET_RX_DESC_HWRSVD(__pdesc) \
442 LE_BITS_TO_4BYTE(__pdesc+4, 9, 5) 466 SHIFT_AND_MASK_LE(__pdesc+4, 9, 5)
443#define GET_RX_DESC_PAGGR(__pdesc) \ 467#define GET_RX_DESC_PAGGR(__pdesc) \
444 LE_BITS_TO_4BYTE(__pdesc+4, 14, 1) 468 SHIFT_AND_MASK_LE(__pdesc+4, 14, 1)
445#define GET_RX_DESC_FAGGR(__pdesc) \ 469#define GET_RX_DESC_FAGGR(__pdesc) \
446 LE_BITS_TO_4BYTE(__pdesc+4, 15, 1) 470 SHIFT_AND_MASK_LE(__pdesc+4, 15, 1)
447#define GET_RX_DESC_A1_FIT(__pdesc) \ 471#define GET_RX_DESC_A1_FIT(__pdesc) \
448 LE_BITS_TO_4BYTE(__pdesc+4, 16, 4) 472 SHIFT_AND_MASK_LE(__pdesc+4, 16, 4)
449#define GET_RX_DESC_A2_FIT(__pdesc) \ 473#define GET_RX_DESC_A2_FIT(__pdesc) \
450 LE_BITS_TO_4BYTE(__pdesc+4, 20, 4) 474 SHIFT_AND_MASK_LE(__pdesc+4, 20, 4)
451#define GET_RX_DESC_PAM(__pdesc) \ 475#define GET_RX_DESC_PAM(__pdesc) \
452 LE_BITS_TO_4BYTE(__pdesc+4, 24, 1) 476 SHIFT_AND_MASK_LE(__pdesc+4, 24, 1)
453#define GET_RX_DESC_PWR(__pdesc) \ 477#define GET_RX_DESC_PWR(__pdesc) \
454 LE_BITS_TO_4BYTE(__pdesc+4, 25, 1) 478 SHIFT_AND_MASK_LE(__pdesc+4, 25, 1)
455#define GET_RX_DESC_MD(__pdesc) \ 479#define GET_RX_DESC_MD(__pdesc) \
456 LE_BITS_TO_4BYTE(__pdesc+4, 26, 1) 480 SHIFT_AND_MASK_LE(__pdesc+4, 26, 1)
457#define GET_RX_DESC_MF(__pdesc) \ 481#define GET_RX_DESC_MF(__pdesc) \
458 LE_BITS_TO_4BYTE(__pdesc+4, 27, 1) 482 SHIFT_AND_MASK_LE(__pdesc+4, 27, 1)
459#define GET_RX_DESC_TYPE(__pdesc) \ 483#define GET_RX_DESC_TYPE(__pdesc) \
460 LE_BITS_TO_4BYTE(__pdesc+4, 28, 2) 484 SHIFT_AND_MASK_LE(__pdesc+4, 28, 2)
461#define GET_RX_DESC_MC(__pdesc) \ 485#define GET_RX_DESC_MC(__pdesc) \
462 LE_BITS_TO_4BYTE(__pdesc+4, 30, 1) 486 SHIFT_AND_MASK_LE(__pdesc+4, 30, 1)
463#define GET_RX_DESC_BC(__pdesc) \ 487#define GET_RX_DESC_BC(__pdesc) \
464 LE_BITS_TO_4BYTE(__pdesc+4, 31, 1) 488 SHIFT_AND_MASK_LE(__pdesc+4, 31, 1)
465#define GET_RX_DESC_SEQ(__pdesc) \ 489#define GET_RX_DESC_SEQ(__pdesc) \
466 LE_BITS_TO_4BYTE(__pdesc+8, 0, 12) 490 SHIFT_AND_MASK_LE(__pdesc+8, 0, 12)
467#define GET_RX_DESC_FRAG(__pdesc) \ 491#define GET_RX_DESC_FRAG(__pdesc) \
468 LE_BITS_TO_4BYTE(__pdesc+8, 12, 4) 492 SHIFT_AND_MASK_LE(__pdesc+8, 12, 4)
469#define GET_RX_DESC_NEXT_PKT_LEN(__pdesc) \ 493#define GET_RX_DESC_NEXT_PKT_LEN(__pdesc) \
470 LE_BITS_TO_4BYTE(__pdesc+8, 16, 14) 494 SHIFT_AND_MASK_LE(__pdesc+8, 16, 14)
471#define GET_RX_DESC_NEXT_IND(__pdesc) \ 495#define GET_RX_DESC_NEXT_IND(__pdesc) \
472 LE_BITS_TO_4BYTE(__pdesc+8, 30, 1) 496 SHIFT_AND_MASK_LE(__pdesc+8, 30, 1)
473#define GET_RX_DESC_RSVD(__pdesc) \ 497#define GET_RX_DESC_RSVD(__pdesc) \
474 LE_BITS_TO_4BYTE(__pdesc+8, 31, 1) 498 SHIFT_AND_MASK_LE(__pdesc+8, 31, 1)
475 499
476#define GET_RX_DESC_RXMCS(__pdesc) \ 500#define GET_RX_DESC_RXMCS(__pdesc) \
477 LE_BITS_TO_4BYTE(__pdesc+12, 0, 6) 501 SHIFT_AND_MASK_LE(__pdesc+12, 0, 6)
478#define GET_RX_DESC_RXHT(__pdesc) \ 502#define GET_RX_DESC_RXHT(__pdesc) \
479 LE_BITS_TO_4BYTE(__pdesc+12, 6, 1) 503 SHIFT_AND_MASK_LE(__pdesc+12, 6, 1)
480#define GET_RX_DESC_SPLCP(__pdesc) \ 504#define GET_RX_DESC_SPLCP(__pdesc) \
481 LE_BITS_TO_4BYTE(__pdesc+12, 8, 1) 505 SHIFT_AND_MASK_LE(__pdesc+12, 8, 1)
482#define GET_RX_DESC_BW(__pdesc) \ 506#define GET_RX_DESC_BW(__pdesc) \
483 LE_BITS_TO_4BYTE(__pdesc+12, 9, 1) 507 SHIFT_AND_MASK_LE(__pdesc+12, 9, 1)
484#define GET_RX_DESC_HTC(__pdesc) \ 508#define GET_RX_DESC_HTC(__pdesc) \
485 LE_BITS_TO_4BYTE(__pdesc+12, 10, 1) 509 SHIFT_AND_MASK_LE(__pdesc+12, 10, 1)
486#define GET_RX_DESC_HWPC_ERR(__pdesc) \ 510#define GET_RX_DESC_HWPC_ERR(__pdesc) \
487 LE_BITS_TO_4BYTE(__pdesc+12, 14, 1) 511 SHIFT_AND_MASK_LE(__pdesc+12, 14, 1)
488#define GET_RX_DESC_HWPC_IND(__pdesc) \ 512#define GET_RX_DESC_HWPC_IND(__pdesc) \
489 LE_BITS_TO_4BYTE(__pdesc+12, 15, 1) 513 SHIFT_AND_MASK_LE(__pdesc+12, 15, 1)
490#define GET_RX_DESC_IV0(__pdesc) \ 514#define GET_RX_DESC_IV0(__pdesc) \
491 LE_BITS_TO_4BYTE(__pdesc+12, 16, 16) 515 SHIFT_AND_MASK_LE(__pdesc+12, 16, 16)
492 516
493#define GET_RX_DESC_IV1(__pdesc) \ 517#define GET_RX_DESC_IV1(__pdesc) \
494 LE_BITS_TO_4BYTE(__pdesc+16, 0, 32) 518 SHIFT_AND_MASK_LE(__pdesc+16, 0, 32)
495#define GET_RX_DESC_TSFL(__pdesc) \ 519#define GET_RX_DESC_TSFL(__pdesc) \
496 LE_BITS_TO_4BYTE(__pdesc+20, 0, 32) 520 SHIFT_AND_MASK_LE(__pdesc+20, 0, 32)
497 521
498#define GET_RX_DESC_BUFF_ADDR(__pdesc) \ 522#define GET_RX_DESC_BUFF_ADDR(__pdesc) \
499 LE_BITS_TO_4BYTE(__pdesc+24, 0, 32) 523 SHIFT_AND_MASK_LE(__pdesc+24, 0, 32)
500#define GET_RX_DESC_BUFF_ADDR64(__pdesc) \ 524#define GET_RX_DESC_BUFF_ADDR64(__pdesc) \
501 LE_BITS_TO_4BYTE(__pdesc+28, 0, 32) 525 SHIFT_AND_MASK_LE(__pdesc+28, 0, 32)
502 526
503#define SET_RX_DESC_BUFF_ADDR(__pdesc, __val) \ 527#define SET_RX_DESC_BUFF_ADDR(__pdesc, __val) \
504 SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 32, __val) 528 SET_BITS_OFFSET_LE(__pdesc+24, 0, 32, __val)
505#define SET_RX_DESC_BUFF_ADDR64(__pdesc, __val) \ 529#define SET_RX_DESC_BUFF_ADDR64(__pdesc, __val) \
506 SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 32, __val) 530 SET_BITS_OFFSET_LE(__pdesc+28, 0, 32, __val)
507 531
508#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \ 532#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \
509do { \ 533do { \
@@ -711,4 +735,6 @@ void rtl92ce_tx_polling(struct ieee80211_hw *hw, unsigned int hw_queue);
711void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, 735void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
712 bool b_firstseg, bool b_lastseg, 736 bool b_firstseg, bool b_lastseg,
713 struct sk_buff *skb); 737 struct sk_buff *skb);
738bool _rtl92c_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb);
739
714#endif 740#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/Makefile b/drivers/net/wireless/rtlwifi/rtl8192cu/Makefile
new file mode 100644
index 00000000000..91c65122ca8
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/Makefile
@@ -0,0 +1,15 @@
1rtl8192cu-objs := \
2 dm.o \
3 fw.o \
4 hw.o \
5 led.o \
6 mac.o \
7 phy.o \
8 rf.o \
9 sw.o \
10 table.o \
11 trx.o
12
13obj-$(CONFIG_RTL8192CU) += rtl8192cu.o
14
15ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/def.h b/drivers/net/wireless/rtlwifi/rtl8192cu/def.h
new file mode 100644
index 00000000000..c54940ea72f
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/def.h
@@ -0,0 +1,62 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../rtl8192ce/def.h"
31
32/*-------------------------------------------------------------------------
33 * Chip specific
34 *-------------------------------------------------------------------------*/
35#define CHIP_8723 BIT(2) /* RTL8723 With BT feature */
36#define CHIP_8723_DRV_REV BIT(3) /* RTL8723 Driver Revised */
37#define NORMAL_CHIP BIT(4)
38#define CHIP_VENDOR_UMC BIT(5)
39#define CHIP_VENDOR_UMC_B_CUT BIT(6)
40
41#define IS_NORMAL_CHIP(version) \
42 (((version) & NORMAL_CHIP) ? true : false)
43
44#define IS_8723_SERIES(version) \
45 (((version) & CHIP_8723) ? true : false)
46
47#define IS_92C_1T2R(version) \
48 (((version) & CHIP_92C) && ((version) & CHIP_92C_1T2R))
49
50#define IS_VENDOR_UMC(version) \
51 (((version) & CHIP_VENDOR_UMC) ? true : false)
52
53#define IS_VENDOR_UMC_A_CUT(version) \
54 (((version) & CHIP_VENDOR_UMC) ? (((version) & (BIT(6) | BIT(7))) ? \
55 false : true) : false)
56
57#define IS_VENDOR_8723_A_CUT(version) \
58 (((version) & CHIP_VENDOR_UMC) ? (((version) & (BIT(6))) ? \
59 false : true) : false)
60
61#define CHIP_BONDING_92C_1T2R 0x1
62#define CHIP_BONDING_IDENTIFIER(_value) (((_value) >> 22) & 0x3)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c
new file mode 100644
index 00000000000..a4649a2f7e6
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c
@@ -0,0 +1,116 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../wifi.h"
31#include "../base.h"
32#include "reg.h"
33#include "def.h"
34#include "phy.h"
35#include "dm.h"
36#include "fw.h"
37
38#include "../rtl8192c/dm_common.c"
39
40void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw)
41{
42 struct rtl_priv *rtlpriv = rtl_priv(hw);
43 struct rtl_phy *rtlphy = &(rtlpriv->phy);
44 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
45 long undecorated_smoothed_pwdb;
46
47 if (!rtlpriv->dm.dynamic_txpower_enable)
48 return;
49
50 if (rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) {
51 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
52 return;
53 }
54
55 if ((mac->link_state < MAC80211_LINKED) &&
56 (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
57 RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
58 ("Not connected to any\n"));
59
60 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
61
62 rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
63 return;
64 }
65
66 if (mac->link_state >= MAC80211_LINKED) {
67 if (mac->opmode == NL80211_IFTYPE_ADHOC) {
68 undecorated_smoothed_pwdb =
69 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
70 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
71 ("AP Client PWDB = 0x%lx\n",
72 undecorated_smoothed_pwdb));
73 } else {
74 undecorated_smoothed_pwdb =
75 rtlpriv->dm.undecorated_smoothed_pwdb;
76 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
77 ("STA Default Port PWDB = 0x%lx\n",
78 undecorated_smoothed_pwdb));
79 }
80 } else {
81 undecorated_smoothed_pwdb =
82 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
83
84 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
85 ("AP Ext Port PWDB = 0x%lx\n",
86 undecorated_smoothed_pwdb));
87 }
88
89 if (undecorated_smoothed_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
90 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
91 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
92 ("TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n"));
93 } else if ((undecorated_smoothed_pwdb <
94 (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
95 (undecorated_smoothed_pwdb >=
96 TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
97
98 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
99 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
100 ("TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n"));
101 } else if (undecorated_smoothed_pwdb <
102 (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
103 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
104 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
105 ("TXHIGHPWRLEVEL_NORMAL\n"));
106 }
107
108 if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl)) {
109 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
110 ("PHY_SetTxPowerLevel8192S() Channel = %d\n",
111 rtlphy->current_channel));
112 rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
113 }
114
115 rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl;
116}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h
new file mode 100644
index 00000000000..5e7fbfc2851
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h
@@ -0,0 +1,32 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../rtl8192ce/dm.h"
31
32void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/fw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/fw.c
new file mode 100644
index 00000000000..8e350eea342
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/fw.c
@@ -0,0 +1,30 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../rtl8192ce/fw.c"
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/fw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/fw.h
new file mode 100644
index 00000000000..a3bbac811d0
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/fw.h
@@ -0,0 +1,30 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../rtl8192ce/fw.h"
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
new file mode 100644
index 00000000000..df8fe3b51c9
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -0,0 +1,2505 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../wifi.h"
31#include "../efuse.h"
32#include "../base.h"
33#include "../cam.h"
34#include "../ps.h"
35#include "../usb.h"
36#include "reg.h"
37#include "def.h"
38#include "phy.h"
39#include "mac.h"
40#include "dm.h"
41#include "fw.h"
42#include "hw.h"
43#include "trx.h"
44#include "led.h"
45#include "table.h"
46
47static void _rtl92cu_phy_param_tab_init(struct ieee80211_hw *hw)
48{
49 struct rtl_priv *rtlpriv = rtl_priv(hw);
50 struct rtl_phy *rtlphy = &(rtlpriv->phy);
51 struct rtl_efuse *rtlefuse = rtl_efuse(rtlpriv);
52
53 rtlphy->hwparam_tables[MAC_REG].length = RTL8192CUMAC_2T_ARRAYLENGTH;
54 rtlphy->hwparam_tables[MAC_REG].pdata = RTL8192CUMAC_2T_ARRAY;
55 if (IS_HIGHT_PA(rtlefuse->board_type)) {
56 rtlphy->hwparam_tables[PHY_REG_PG].length =
57 RTL8192CUPHY_REG_Array_PG_HPLength;
58 rtlphy->hwparam_tables[PHY_REG_PG].pdata =
59 RTL8192CUPHY_REG_Array_PG_HP;
60 } else {
61 rtlphy->hwparam_tables[PHY_REG_PG].length =
62 RTL8192CUPHY_REG_ARRAY_PGLENGTH;
63 rtlphy->hwparam_tables[PHY_REG_PG].pdata =
64 RTL8192CUPHY_REG_ARRAY_PG;
65 }
66 /* 2T */
67 rtlphy->hwparam_tables[PHY_REG_2T].length =
68 RTL8192CUPHY_REG_2TARRAY_LENGTH;
69 rtlphy->hwparam_tables[PHY_REG_2T].pdata =
70 RTL8192CUPHY_REG_2TARRAY;
71 rtlphy->hwparam_tables[RADIOA_2T].length =
72 RTL8192CURADIOA_2TARRAYLENGTH;
73 rtlphy->hwparam_tables[RADIOA_2T].pdata =
74 RTL8192CURADIOA_2TARRAY;
75 rtlphy->hwparam_tables[RADIOB_2T].length =
76 RTL8192CURADIOB_2TARRAYLENGTH;
77 rtlphy->hwparam_tables[RADIOB_2T].pdata =
78 RTL8192CU_RADIOB_2TARRAY;
79 rtlphy->hwparam_tables[AGCTAB_2T].length =
80 RTL8192CUAGCTAB_2TARRAYLENGTH;
81 rtlphy->hwparam_tables[AGCTAB_2T].pdata =
82 RTL8192CUAGCTAB_2TARRAY;
83 /* 1T */
84 if (IS_HIGHT_PA(rtlefuse->board_type)) {
85 rtlphy->hwparam_tables[PHY_REG_1T].length =
86 RTL8192CUPHY_REG_1T_HPArrayLength;
87 rtlphy->hwparam_tables[PHY_REG_1T].pdata =
88 RTL8192CUPHY_REG_1T_HPArray;
89 rtlphy->hwparam_tables[RADIOA_1T].length =
90 RTL8192CURadioA_1T_HPArrayLength;
91 rtlphy->hwparam_tables[RADIOA_1T].pdata =
92 RTL8192CURadioA_1T_HPArray;
93 rtlphy->hwparam_tables[RADIOB_1T].length =
94 RTL8192CURADIOB_1TARRAYLENGTH;
95 rtlphy->hwparam_tables[RADIOB_1T].pdata =
96 RTL8192CU_RADIOB_1TARRAY;
97 rtlphy->hwparam_tables[AGCTAB_1T].length =
98 RTL8192CUAGCTAB_1T_HPArrayLength;
99 rtlphy->hwparam_tables[AGCTAB_1T].pdata =
100 Rtl8192CUAGCTAB_1T_HPArray;
101 } else {
102 rtlphy->hwparam_tables[PHY_REG_1T].length =
103 RTL8192CUPHY_REG_1TARRAY_LENGTH;
104 rtlphy->hwparam_tables[PHY_REG_1T].pdata =
105 RTL8192CUPHY_REG_1TARRAY;
106 rtlphy->hwparam_tables[RADIOA_1T].length =
107 RTL8192CURADIOA_1TARRAYLENGTH;
108 rtlphy->hwparam_tables[RADIOA_1T].pdata =
109 RTL8192CU_RADIOA_1TARRAY;
110 rtlphy->hwparam_tables[RADIOB_1T].length =
111 RTL8192CURADIOB_1TARRAYLENGTH;
112 rtlphy->hwparam_tables[RADIOB_1T].pdata =
113 RTL8192CU_RADIOB_1TARRAY;
114 rtlphy->hwparam_tables[AGCTAB_1T].length =
115 RTL8192CUAGCTAB_1TARRAYLENGTH;
116 rtlphy->hwparam_tables[AGCTAB_1T].pdata =
117 RTL8192CUAGCTAB_1TARRAY;
118 }
119}
120
121static void _rtl92cu_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
122 bool autoload_fail,
123 u8 *hwinfo)
124{
125 struct rtl_priv *rtlpriv = rtl_priv(hw);
126 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
127 u8 rf_path, index, tempval;
128 u16 i;
129
130 for (rf_path = 0; rf_path < 2; rf_path++) {
131 for (i = 0; i < 3; i++) {
132 if (!autoload_fail) {
133 rtlefuse->
134 eeprom_chnlarea_txpwr_cck[rf_path][i] =
135 hwinfo[EEPROM_TXPOWERCCK + rf_path * 3 + i];
136 rtlefuse->
137 eeprom_chnlarea_txpwr_ht40_1s[rf_path][i] =
138 hwinfo[EEPROM_TXPOWERHT40_1S + rf_path * 3 +
139 i];
140 } else {
141 rtlefuse->
142 eeprom_chnlarea_txpwr_cck[rf_path][i] =
143 EEPROM_DEFAULT_TXPOWERLEVEL;
144 rtlefuse->
145 eeprom_chnlarea_txpwr_ht40_1s[rf_path][i] =
146 EEPROM_DEFAULT_TXPOWERLEVEL;
147 }
148 }
149 }
150 for (i = 0; i < 3; i++) {
151 if (!autoload_fail)
152 tempval = hwinfo[EEPROM_TXPOWERHT40_2SDIFF + i];
153 else
154 tempval = EEPROM_DEFAULT_HT40_2SDIFF;
155 rtlefuse->eeprom_chnlarea_txpwr_ht40_2sdiif[RF90_PATH_A][i] =
156 (tempval & 0xf);
157 rtlefuse->eeprom_chnlarea_txpwr_ht40_2sdiif[RF90_PATH_B][i] =
158 ((tempval & 0xf0) >> 4);
159 }
160 for (rf_path = 0; rf_path < 2; rf_path++)
161 for (i = 0; i < 3; i++)
162 RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
163 ("RF(%d) EEPROM CCK Area(%d) = 0x%x\n", rf_path,
164 i, rtlefuse->
165 eeprom_chnlarea_txpwr_cck[rf_path][i]));
166 for (rf_path = 0; rf_path < 2; rf_path++)
167 for (i = 0; i < 3; i++)
168 RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
169 ("RF(%d) EEPROM HT40 1S Area(%d) = 0x%x\n",
170 rf_path, i,
171 rtlefuse->
172 eeprom_chnlarea_txpwr_ht40_1s[rf_path][i]));
173 for (rf_path = 0; rf_path < 2; rf_path++)
174 for (i = 0; i < 3; i++)
175 RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
176 ("RF(%d) EEPROM HT40 2S Diff Area(%d) = 0x%x\n",
177 rf_path, i,
178 rtlefuse->
179 eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path]
180 [i]));
181 for (rf_path = 0; rf_path < 2; rf_path++) {
182 for (i = 0; i < 14; i++) {
183 index = _rtl92c_get_chnl_group((u8) i);
184 rtlefuse->txpwrlevel_cck[rf_path][i] =
185 rtlefuse->eeprom_chnlarea_txpwr_cck[rf_path][index];
186 rtlefuse->txpwrlevel_ht40_1s[rf_path][i] =
187 rtlefuse->
188 eeprom_chnlarea_txpwr_ht40_1s[rf_path][index];
189 if ((rtlefuse->
190 eeprom_chnlarea_txpwr_ht40_1s[rf_path][index] -
191 rtlefuse->
192 eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path][index])
193 > 0) {
194 rtlefuse->txpwrlevel_ht40_2s[rf_path][i] =
195 rtlefuse->
196 eeprom_chnlarea_txpwr_ht40_1s[rf_path]
197 [index] - rtlefuse->
198 eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path]
199 [index];
200 } else {
201 rtlefuse->txpwrlevel_ht40_2s[rf_path][i] = 0;
202 }
203 }
204 for (i = 0; i < 14; i++) {
205 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
206 ("RF(%d)-Ch(%d) [CCK / HT40_1S / HT40_2S] = "
207 "[0x%x / 0x%x / 0x%x]\n", rf_path, i,
208 rtlefuse->txpwrlevel_cck[rf_path][i],
209 rtlefuse->txpwrlevel_ht40_1s[rf_path][i],
210 rtlefuse->txpwrlevel_ht40_2s[rf_path][i]));
211 }
212 }
213 for (i = 0; i < 3; i++) {
214 if (!autoload_fail) {
215 rtlefuse->eeprom_pwrlimit_ht40[i] =
216 hwinfo[EEPROM_TXPWR_GROUP + i];
217 rtlefuse->eeprom_pwrlimit_ht20[i] =
218 hwinfo[EEPROM_TXPWR_GROUP + 3 + i];
219 } else {
220 rtlefuse->eeprom_pwrlimit_ht40[i] = 0;
221 rtlefuse->eeprom_pwrlimit_ht20[i] = 0;
222 }
223 }
224 for (rf_path = 0; rf_path < 2; rf_path++) {
225 for (i = 0; i < 14; i++) {
226 index = _rtl92c_get_chnl_group((u8) i);
227 if (rf_path == RF90_PATH_A) {
228 rtlefuse->pwrgroup_ht20[rf_path][i] =
229 (rtlefuse->eeprom_pwrlimit_ht20[index]
230 & 0xf);
231 rtlefuse->pwrgroup_ht40[rf_path][i] =
232 (rtlefuse->eeprom_pwrlimit_ht40[index]
233 & 0xf);
234 } else if (rf_path == RF90_PATH_B) {
235 rtlefuse->pwrgroup_ht20[rf_path][i] =
236 ((rtlefuse->eeprom_pwrlimit_ht20[index]
237 & 0xf0) >> 4);
238 rtlefuse->pwrgroup_ht40[rf_path][i] =
239 ((rtlefuse->eeprom_pwrlimit_ht40[index]
240 & 0xf0) >> 4);
241 }
242 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
243 ("RF-%d pwrgroup_ht20[%d] = 0x%x\n",
244 rf_path, i,
245 rtlefuse->pwrgroup_ht20[rf_path][i]));
246 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
247 ("RF-%d pwrgroup_ht40[%d] = 0x%x\n",
248 rf_path, i,
249 rtlefuse->pwrgroup_ht40[rf_path][i]));
250 }
251 }
252 for (i = 0; i < 14; i++) {
253 index = _rtl92c_get_chnl_group((u8) i);
254 if (!autoload_fail)
255 tempval = hwinfo[EEPROM_TXPOWERHT20DIFF + index];
256 else
257 tempval = EEPROM_DEFAULT_HT20_DIFF;
258 rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] = (tempval & 0xF);
259 rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] =
260 ((tempval >> 4) & 0xF);
261 if (rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] & BIT(3))
262 rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] |= 0xF0;
263 if (rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] & BIT(3))
264 rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] |= 0xF0;
265 index = _rtl92c_get_chnl_group((u8) i);
266 if (!autoload_fail)
267 tempval = hwinfo[EEPROM_TXPOWER_OFDMDIFF + index];
268 else
269 tempval = EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF;
270 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i] = (tempval & 0xF);
271 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i] =
272 ((tempval >> 4) & 0xF);
273 }
274 rtlefuse->legacy_ht_txpowerdiff =
275 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][7];
276 for (i = 0; i < 14; i++)
277 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
278 ("RF-A Ht20 to HT40 Diff[%d] = 0x%x\n", i,
279 rtlefuse->txpwr_ht20diff[RF90_PATH_A][i]));
280 for (i = 0; i < 14; i++)
281 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
282 ("RF-A Legacy to Ht40 Diff[%d] = 0x%x\n", i,
283 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i]));
284 for (i = 0; i < 14; i++)
285 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
286 ("RF-B Ht20 to HT40 Diff[%d] = 0x%x\n", i,
287 rtlefuse->txpwr_ht20diff[RF90_PATH_B][i]));
288 for (i = 0; i < 14; i++)
289 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
290 ("RF-B Legacy to HT40 Diff[%d] = 0x%x\n", i,
291 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i]));
292 if (!autoload_fail)
293 rtlefuse->eeprom_regulatory = (hwinfo[RF_OPTION1] & 0x7);
294 else
295 rtlefuse->eeprom_regulatory = 0;
296 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
297 ("eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory));
298 if (!autoload_fail) {
299 rtlefuse->eeprom_tssi[RF90_PATH_A] = hwinfo[EEPROM_TSSI_A];
300 rtlefuse->eeprom_tssi[RF90_PATH_B] = hwinfo[EEPROM_TSSI_B];
301 } else {
302 rtlefuse->eeprom_tssi[RF90_PATH_A] = EEPROM_DEFAULT_TSSI;
303 rtlefuse->eeprom_tssi[RF90_PATH_B] = EEPROM_DEFAULT_TSSI;
304 }
305 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
306 ("TSSI_A = 0x%x, TSSI_B = 0x%x\n",
307 rtlefuse->eeprom_tssi[RF90_PATH_A],
308 rtlefuse->eeprom_tssi[RF90_PATH_B]));
309 if (!autoload_fail)
310 tempval = hwinfo[EEPROM_THERMAL_METER];
311 else
312 tempval = EEPROM_DEFAULT_THERMALMETER;
313 rtlefuse->eeprom_thermalmeter = (tempval & 0x1f);
314 if (rtlefuse->eeprom_thermalmeter < 0x06 ||
315 rtlefuse->eeprom_thermalmeter > 0x1c)
316 rtlefuse->eeprom_thermalmeter = 0x12;
317 if (rtlefuse->eeprom_thermalmeter == 0x1f || autoload_fail)
318 rtlefuse->apk_thermalmeterignore = true;
319 rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter;
320 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
321 ("thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter));
322}
323
324static void _rtl92cu_read_board_type(struct ieee80211_hw *hw, u8 *contents)
325{
326 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
327 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
328 u8 boardType;
329
330 if (IS_NORMAL_CHIP(rtlhal->version)) {
331 boardType = ((contents[EEPROM_RF_OPT1]) &
332 BOARD_TYPE_NORMAL_MASK) >> 5; /*bit[7:5]*/
333 } else {
334 boardType = contents[EEPROM_RF_OPT4];
335 boardType &= BOARD_TYPE_TEST_MASK;
336 }
337 rtlefuse->board_type = boardType;
338 if (IS_HIGHT_PA(rtlefuse->board_type))
339 rtlefuse->external_pa = 1;
340 printk(KERN_INFO "rtl8192cu: Board Type %x\n", rtlefuse->board_type);
341
342#ifdef CONFIG_ANTENNA_DIVERSITY
343 /* Antenna Diversity setting. */
344 if (registry_par->antdiv_cfg == 2) /* 2: From Efuse */
345 rtl_efuse->antenna_cfg = (contents[EEPROM_RF_OPT1]&0x18)>>3;
346 else
347 rtl_efuse->antenna_cfg = registry_par->antdiv_cfg; /* 0:OFF, */
348
349 printk(KERN_INFO "rtl8192cu: Antenna Config %x\n",
350 rtl_efuse->antenna_cfg);
351#endif
352}
353
354#ifdef CONFIG_BT_COEXIST
355static void _update_bt_param(_adapter *padapter)
356{
357 struct btcoexist_priv *pbtpriv = &(padapter->halpriv.bt_coexist);
358 struct registry_priv *registry_par = &padapter->registrypriv;
359 if (2 != registry_par->bt_iso) {
360 /* 0:Low, 1:High, 2:From Efuse */
361 pbtpriv->BT_Ant_isolation = registry_par->bt_iso;
362 }
363 if (registry_par->bt_sco == 1) {
364 /* 0:Idle, 1:None-SCO, 2:SCO, 3:From Counter, 4.Busy,
365 * 5.OtherBusy */
366 pbtpriv->BT_Service = BT_OtherAction;
367 } else if (registry_par->bt_sco == 2) {
368 pbtpriv->BT_Service = BT_SCO;
369 } else if (registry_par->bt_sco == 4) {
370 pbtpriv->BT_Service = BT_Busy;
371 } else if (registry_par->bt_sco == 5) {
372 pbtpriv->BT_Service = BT_OtherBusy;
373 } else {
374 pbtpriv->BT_Service = BT_Idle;
375 }
376 pbtpriv->BT_Ampdu = registry_par->bt_ampdu;
377 pbtpriv->bCOBT = _TRUE;
378 pbtpriv->BtEdcaUL = 0;
379 pbtpriv->BtEdcaDL = 0;
380 pbtpriv->BtRssiState = 0xff;
381 pbtpriv->bInitSet = _FALSE;
382 pbtpriv->bBTBusyTraffic = _FALSE;
383 pbtpriv->bBTTrafficModeSet = _FALSE;
384 pbtpriv->bBTNonTrafficModeSet = _FALSE;
385 pbtpriv->CurrentState = 0;
386 pbtpriv->PreviousState = 0;
387 printk(KERN_INFO "rtl8192cu: BT Coexistance = %s\n",
388 (pbtpriv->BT_Coexist == _TRUE) ? "enable" : "disable");
389 if (pbtpriv->BT_Coexist) {
390 if (pbtpriv->BT_Ant_Num == Ant_x2)
391 printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
392 "Ant_Num = Antx2\n");
393 else if (pbtpriv->BT_Ant_Num == Ant_x1)
394 printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
395 "Ant_Num = Antx1\n");
396 switch (pbtpriv->BT_CoexistType) {
397 case BT_2Wire:
398 printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
399 "CoexistType = BT_2Wire\n");
400 break;
401 case BT_ISSC_3Wire:
402 printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
403 "CoexistType = BT_ISSC_3Wire\n");
404 break;
405 case BT_Accel:
406 printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
407 "CoexistType = BT_Accel\n");
408 break;
409 case BT_CSR_BC4:
410 printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
411 "CoexistType = BT_CSR_BC4\n");
412 break;
413 case BT_CSR_BC8:
414 printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
415 "CoexistType = BT_CSR_BC8\n");
416 break;
417 case BT_RTL8756:
418 printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
419 "CoexistType = BT_RTL8756\n");
420 break;
421 default:
422 printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
423 "CoexistType = Unknown\n");
424 break;
425 }
426 printk(KERN_INFO "rtl8192cu: BlueTooth BT_Ant_isolation = %d\n",
427 pbtpriv->BT_Ant_isolation);
428 switch (pbtpriv->BT_Service) {
429 case BT_OtherAction:
430 printk(KERN_INFO "rtl8192cu: BlueTooth BT_Service = "
431 "BT_OtherAction\n");
432 break;
433 case BT_SCO:
434 printk(KERN_INFO "rtl8192cu: BlueTooth BT_Service = "
435 "BT_SCO\n");
436 break;
437 case BT_Busy:
438 printk(KERN_INFO "rtl8192cu: BlueTooth BT_Service = "
439 "BT_Busy\n");
440 break;
441 case BT_OtherBusy:
442 printk(KERN_INFO "rtl8192cu: BlueTooth BT_Service = "
443 "BT_OtherBusy\n");
444 break;
445 default:
446 printk(KERN_INFO "rtl8192cu: BlueTooth BT_Service = "
447 "BT_Idle\n");
448 break;
449 }
450 printk(KERN_INFO "rtl8192cu: BT_RadioSharedType = 0x%x\n",
451 pbtpriv->BT_RadioSharedType);
452 }
453}
454
455#define GET_BT_COEXIST(priv) (&priv->bt_coexist)
456
457static void _rtl92cu_read_bluetooth_coexistInfo(struct ieee80211_hw *hw,
458 u8 *contents,
459 bool bautoloadfailed);
460{
461 HAL_DATA_TYPE *pHalData = GET_HAL_DATA(Adapter);
462 bool isNormal = IS_NORMAL_CHIP(pHalData->VersionID);
463 struct btcoexist_priv *pbtpriv = &pHalData->bt_coexist;
464 u8 rf_opt4;
465
466 _rtw_memset(pbtpriv, 0, sizeof(struct btcoexist_priv));
467 if (AutoloadFail) {
468 pbtpriv->BT_Coexist = _FALSE;
469 pbtpriv->BT_CoexistType = BT_2Wire;
470 pbtpriv->BT_Ant_Num = Ant_x2;
471 pbtpriv->BT_Ant_isolation = 0;
472 pbtpriv->BT_RadioSharedType = BT_Radio_Shared;
473 return;
474 }
475 if (isNormal) {
476 if (pHalData->BoardType == BOARD_USB_COMBO)
477 pbtpriv->BT_Coexist = _TRUE;
478 else
479 pbtpriv->BT_Coexist = ((PROMContent[EEPROM_RF_OPT3] &
480 0x20) >> 5); /* bit[5] */
481 rf_opt4 = PROMContent[EEPROM_RF_OPT4];
482 pbtpriv->BT_CoexistType = ((rf_opt4&0xe)>>1); /* bit [3:1] */
483 pbtpriv->BT_Ant_Num = (rf_opt4&0x1); /* bit [0] */
484 pbtpriv->BT_Ant_isolation = ((rf_opt4&0x10)>>4); /* bit [4] */
485 pbtpriv->BT_RadioSharedType = ((rf_opt4&0x20)>>5); /* bit [5] */
486 } else {
487 pbtpriv->BT_Coexist = (PROMContent[EEPROM_RF_OPT4] >> 4) ?
488 _TRUE : _FALSE;
489 }
490 _update_bt_param(Adapter);
491}
492#endif
493
494static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw)
495{
496 struct rtl_priv *rtlpriv = rtl_priv(hw);
497 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
498 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
499 u16 i, usvalue;
500 u8 hwinfo[HWSET_MAX_SIZE] = {0};
501 u16 eeprom_id;
502
503 if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) {
504 rtl_efuse_shadow_map_update(hw);
505 memcpy((void *)hwinfo,
506 (void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
507 HWSET_MAX_SIZE);
508 } else if (rtlefuse->epromtype == EEPROM_93C46) {
509 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
510 ("RTL819X Not boot from eeprom, check it !!"));
511 }
512 RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD, ("MAP\n"),
513 hwinfo, HWSET_MAX_SIZE);
514 eeprom_id = *((u16 *)&hwinfo[0]);
515 if (eeprom_id != RTL8190_EEPROM_ID) {
516 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
517 ("EEPROM ID(%#x) is invalid!!\n", eeprom_id));
518 rtlefuse->autoload_failflag = true;
519 } else {
520 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("Autoload OK\n"));
521 rtlefuse->autoload_failflag = false;
522 }
523 if (rtlefuse->autoload_failflag == true)
524 return;
525 for (i = 0; i < 6; i += 2) {
526 usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR + i];
527 *((u16 *) (&rtlefuse->dev_addr[i])) = usvalue;
528 }
529 printk(KERN_INFO "rtl8192cu: MAC address: %pM\n", rtlefuse->dev_addr);
530 _rtl92cu_read_txpower_info_from_hwpg(hw,
531 rtlefuse->autoload_failflag, hwinfo);
532 rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID];
533 rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID];
534 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
535 (" VID = 0x%02x PID = 0x%02x\n",
536 rtlefuse->eeprom_vid, rtlefuse->eeprom_did));
537 rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
538 rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
539 rtlefuse->txpwr_fromeprom = true;
540 rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
541 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
542 ("EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid));
543 if (rtlhal->oem_id == RT_CID_DEFAULT) {
544 switch (rtlefuse->eeprom_oemid) {
545 case EEPROM_CID_DEFAULT:
546 if (rtlefuse->eeprom_did == 0x8176) {
547 if ((rtlefuse->eeprom_svid == 0x103C &&
548 rtlefuse->eeprom_smid == 0x1629))
549 rtlhal->oem_id = RT_CID_819x_HP;
550 else
551 rtlhal->oem_id = RT_CID_DEFAULT;
552 } else {
553 rtlhal->oem_id = RT_CID_DEFAULT;
554 }
555 break;
556 case EEPROM_CID_TOSHIBA:
557 rtlhal->oem_id = RT_CID_TOSHIBA;
558 break;
559 case EEPROM_CID_QMI:
560 rtlhal->oem_id = RT_CID_819x_QMI;
561 break;
562 case EEPROM_CID_WHQL:
563 default:
564 rtlhal->oem_id = RT_CID_DEFAULT;
565 break;
566 }
567 }
568 _rtl92cu_read_board_type(hw, hwinfo);
569#ifdef CONFIG_BT_COEXIST
570 _rtl92cu_read_bluetooth_coexistInfo(hw, hwinfo,
571 rtlefuse->autoload_failflag);
572#endif
573}
574
575static void _rtl92cu_hal_customized_behavior(struct ieee80211_hw *hw)
576{
577 struct rtl_priv *rtlpriv = rtl_priv(hw);
578 struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
579 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
580
581 switch (rtlhal->oem_id) {
582 case RT_CID_819x_HP:
583 usb_priv->ledctl.led_opendrain = true;
584 break;
585 case RT_CID_819x_Lenovo:
586 case RT_CID_DEFAULT:
587 case RT_CID_TOSHIBA:
588 case RT_CID_CCX:
589 case RT_CID_819x_Acer:
590 case RT_CID_WHQL:
591 default:
592 break;
593 }
594 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
595 ("RT Customized ID: 0x%02X\n", rtlhal->oem_id));
596}
597
598void rtl92cu_read_eeprom_info(struct ieee80211_hw *hw)
599{
600
601 struct rtl_priv *rtlpriv = rtl_priv(hw);
602 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
603 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
604 u8 tmp_u1b;
605
606 if (!IS_NORMAL_CHIP(rtlhal->version))
607 return;
608 tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
609 rtlefuse->epromtype = (tmp_u1b & EEPROMSEL) ?
610 EEPROM_93C46 : EEPROM_BOOT_EFUSE;
611 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("Boot from %s\n",
612 (tmp_u1b & EEPROMSEL) ? "EERROM" : "EFUSE"));
613 rtlefuse->autoload_failflag = (tmp_u1b & EEPROM_EN) ? false : true;
614 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("Autoload %s\n",
615 (tmp_u1b & EEPROM_EN) ? "OK!!" : "ERR!!"));
616 _rtl92cu_read_adapter_info(hw);
617 _rtl92cu_hal_customized_behavior(hw);
618 return;
619}
620
621static int _rtl92cu_init_power_on(struct ieee80211_hw *hw)
622{
623 struct rtl_priv *rtlpriv = rtl_priv(hw);
624 int status = 0;
625 u16 value16;
626 u8 value8;
627 /* polling autoload done. */
628 u32 pollingCount = 0;
629
630 do {
631 if (rtl_read_byte(rtlpriv, REG_APS_FSMCO) & PFM_ALDN) {
632 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
633 ("Autoload Done!\n"));
634 break;
635 }
636 if (pollingCount++ > 100) {
637 RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
638 ("Failed to polling REG_APS_FSMCO[PFM_ALDN]"
639 " done!\n"));
640 return -ENODEV;
641 }
642 } while (true);
643 /* 0. RSV_CTRL 0x1C[7:0] = 0 unlock ISO/CLK/Power control register */
644 rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0);
645 /* Power on when re-enter from IPS/Radio off/card disable */
646 /* enable SPS into PWM mode */
647 rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b);
648 udelay(100);
649 value8 = rtl_read_byte(rtlpriv, REG_LDOV12D_CTRL);
650 if (0 == (value8 & LDV12_EN)) {
651 value8 |= LDV12_EN;
652 rtl_write_byte(rtlpriv, REG_LDOV12D_CTRL, value8);
653 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
654 (" power-on :REG_LDOV12D_CTRL Reg0x21:0x%02x.\n",
655 value8));
656 udelay(100);
657 value8 = rtl_read_byte(rtlpriv, REG_SYS_ISO_CTRL);
658 value8 &= ~ISO_MD2PP;
659 rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL, value8);
660 }
661 /* auto enable WLAN */
662 pollingCount = 0;
663 value16 = rtl_read_word(rtlpriv, REG_APS_FSMCO);
664 value16 |= APFM_ONMAC;
665 rtl_write_word(rtlpriv, REG_APS_FSMCO, value16);
666 do {
667 if (!(rtl_read_word(rtlpriv, REG_APS_FSMCO) & APFM_ONMAC)) {
668 printk(KERN_INFO "rtl8192cu: MAC auto ON okay!\n");
669 break;
670 }
671 if (pollingCount++ > 100) {
672 RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
673 ("Failed to polling REG_APS_FSMCO[APFM_ONMAC]"
674 " done!\n"));
675 return -ENODEV;
676 }
677 } while (true);
678 /* Enable Radio ,GPIO ,and LED function */
679 rtl_write_word(rtlpriv, REG_APS_FSMCO, 0x0812);
680 /* release RF digital isolation */
681 value16 = rtl_read_word(rtlpriv, REG_SYS_ISO_CTRL);
682 value16 &= ~ISO_DIOR;
683 rtl_write_word(rtlpriv, REG_SYS_ISO_CTRL, value16);
684 /* Reconsider when to do this operation after asking HWSD. */
685 pollingCount = 0;
686 rtl_write_byte(rtlpriv, REG_APSD_CTRL, (rtl_read_byte(rtlpriv,
687 REG_APSD_CTRL) & ~BIT(6)));
688 do {
689 pollingCount++;
690 } while ((pollingCount < 200) &&
691 (rtl_read_byte(rtlpriv, REG_APSD_CTRL) & BIT(7)));
692 /* Enable MAC DMA/WMAC/SCHEDULE/SEC block */
693 value16 = rtl_read_word(rtlpriv, REG_CR);
694 value16 |= (HCI_TXDMA_EN | HCI_RXDMA_EN | TXDMA_EN | RXDMA_EN |
695 PROTOCOL_EN | SCHEDULE_EN | MACTXEN | MACRXEN | ENSEC);
696 rtl_write_word(rtlpriv, REG_CR, value16);
697 return status;
698}
699
700static void _rtl92cu_init_queue_reserved_page(struct ieee80211_hw *hw,
701 bool wmm_enable,
702 u8 out_ep_num,
703 u8 queue_sel)
704{
705 struct rtl_priv *rtlpriv = rtl_priv(hw);
706 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
707 bool isChipN = IS_NORMAL_CHIP(rtlhal->version);
708 u32 outEPNum = (u32)out_ep_num;
709 u32 numHQ = 0;
710 u32 numLQ = 0;
711 u32 numNQ = 0;
712 u32 numPubQ;
713 u32 value32;
714 u8 value8;
715 u32 txQPageNum, txQPageUnit, txQRemainPage;
716
717 if (!wmm_enable) {
718 numPubQ = (isChipN) ? CHIP_B_PAGE_NUM_PUBQ :
719 CHIP_A_PAGE_NUM_PUBQ;
720 txQPageNum = TX_TOTAL_PAGE_NUMBER - numPubQ;
721
722 txQPageUnit = txQPageNum/outEPNum;
723 txQRemainPage = txQPageNum % outEPNum;
724 if (queue_sel & TX_SELE_HQ)
725 numHQ = txQPageUnit;
726 if (queue_sel & TX_SELE_LQ)
727 numLQ = txQPageUnit;
728 /* HIGH priority queue always present in the configuration of
729 * 2 out-ep. Remainder pages have assigned to High queue */
730 if ((outEPNum > 1) && (txQRemainPage))
731 numHQ += txQRemainPage;
732 /* NOTE: This step done before writting REG_RQPN. */
733 if (isChipN) {
734 if (queue_sel & TX_SELE_NQ)
735 numNQ = txQPageUnit;
736 value8 = (u8)_NPQ(numNQ);
737 rtl_write_byte(rtlpriv, REG_RQPN_NPQ, value8);
738 }
739 } else {
740 /* for WMM ,number of out-ep must more than or equal to 2! */
741 numPubQ = isChipN ? WMM_CHIP_B_PAGE_NUM_PUBQ :
742 WMM_CHIP_A_PAGE_NUM_PUBQ;
743 if (queue_sel & TX_SELE_HQ) {
744 numHQ = isChipN ? WMM_CHIP_B_PAGE_NUM_HPQ :
745 WMM_CHIP_A_PAGE_NUM_HPQ;
746 }
747 if (queue_sel & TX_SELE_LQ) {
748 numLQ = isChipN ? WMM_CHIP_B_PAGE_NUM_LPQ :
749 WMM_CHIP_A_PAGE_NUM_LPQ;
750 }
751 /* NOTE: This step done before writting REG_RQPN. */
752 if (isChipN) {
753 if (queue_sel & TX_SELE_NQ)
754 numNQ = WMM_CHIP_B_PAGE_NUM_NPQ;
755 value8 = (u8)_NPQ(numNQ);
756 rtl_write_byte(rtlpriv, REG_RQPN_NPQ, value8);
757 }
758 }
759 /* TX DMA */
760 value32 = _HPQ(numHQ) | _LPQ(numLQ) | _PUBQ(numPubQ) | LD_RQPN;
761 rtl_write_dword(rtlpriv, REG_RQPN, value32);
762}
763
764static void _rtl92c_init_trx_buffer(struct ieee80211_hw *hw, bool wmm_enable)
765{
766 struct rtl_priv *rtlpriv = rtl_priv(hw);
767 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
768 u8 txpktbuf_bndy;
769 u8 value8;
770
771 if (!wmm_enable)
772 txpktbuf_bndy = TX_PAGE_BOUNDARY;
773 else /* for WMM */
774 txpktbuf_bndy = (IS_NORMAL_CHIP(rtlhal->version))
775 ? WMM_CHIP_B_TX_PAGE_BOUNDARY
776 : WMM_CHIP_A_TX_PAGE_BOUNDARY;
777 rtl_write_byte(rtlpriv, REG_TXPKTBUF_BCNQ_BDNY, txpktbuf_bndy);
778 rtl_write_byte(rtlpriv, REG_TXPKTBUF_MGQ_BDNY, txpktbuf_bndy);
779 rtl_write_byte(rtlpriv, REG_TXPKTBUF_WMAC_LBK_BF_HD, txpktbuf_bndy);
780 rtl_write_byte(rtlpriv, REG_TRXFF_BNDY, txpktbuf_bndy);
781 rtl_write_byte(rtlpriv, REG_TDECTRL+1, txpktbuf_bndy);
782 rtl_write_word(rtlpriv, (REG_TRXFF_BNDY + 2), 0x27FF);
783 value8 = _PSRX(RX_PAGE_SIZE_REG_VALUE) | _PSTX(PBP_128);
784 rtl_write_byte(rtlpriv, REG_PBP, value8);
785}
786
787static void _rtl92c_init_chipN_reg_priority(struct ieee80211_hw *hw, u16 beQ,
788 u16 bkQ, u16 viQ, u16 voQ,
789 u16 mgtQ, u16 hiQ)
790{
791 struct rtl_priv *rtlpriv = rtl_priv(hw);
792 u16 value16 = (rtl_read_word(rtlpriv, REG_TRXDMA_CTRL) & 0x7);
793
794 value16 |= _TXDMA_BEQ_MAP(beQ) | _TXDMA_BKQ_MAP(bkQ) |
795 _TXDMA_VIQ_MAP(viQ) | _TXDMA_VOQ_MAP(voQ) |
796 _TXDMA_MGQ_MAP(mgtQ) | _TXDMA_HIQ_MAP(hiQ);
797 rtl_write_word(rtlpriv, REG_TRXDMA_CTRL, value16);
798}
799
800static void _rtl92cu_init_chipN_one_out_ep_priority(struct ieee80211_hw *hw,
801 bool wmm_enable,
802 u8 queue_sel)
803{
804 u16 uninitialized_var(value);
805
806 switch (queue_sel) {
807 case TX_SELE_HQ:
808 value = QUEUE_HIGH;
809 break;
810 case TX_SELE_LQ:
811 value = QUEUE_LOW;
812 break;
813 case TX_SELE_NQ:
814 value = QUEUE_NORMAL;
815 break;
816 default:
817 WARN_ON(1); /* Shall not reach here! */
818 break;
819 }
820 _rtl92c_init_chipN_reg_priority(hw, value, value, value, value,
821 value, value);
822 printk(KERN_INFO "rtl8192cu: Tx queue select: 0x%02x\n", queue_sel);
823}
824
825static void _rtl92cu_init_chipN_two_out_ep_priority(struct ieee80211_hw *hw,
826 bool wmm_enable,
827 u8 queue_sel)
828{
829 u16 beQ, bkQ, viQ, voQ, mgtQ, hiQ;
830 u16 uninitialized_var(valueHi);
831 u16 uninitialized_var(valueLow);
832
833 switch (queue_sel) {
834 case (TX_SELE_HQ | TX_SELE_LQ):
835 valueHi = QUEUE_HIGH;
836 valueLow = QUEUE_LOW;
837 break;
838 case (TX_SELE_NQ | TX_SELE_LQ):
839 valueHi = QUEUE_NORMAL;
840 valueLow = QUEUE_LOW;
841 break;
842 case (TX_SELE_HQ | TX_SELE_NQ):
843 valueHi = QUEUE_HIGH;
844 valueLow = QUEUE_NORMAL;
845 break;
846 default:
847 WARN_ON(1);
848 break;
849 }
850 if (!wmm_enable) {
851 beQ = valueLow;
852 bkQ = valueLow;
853 viQ = valueHi;
854 voQ = valueHi;
855 mgtQ = valueHi;
856 hiQ = valueHi;
857 } else {/* for WMM ,CONFIG_OUT_EP_WIFI_MODE */
858 beQ = valueHi;
859 bkQ = valueLow;
860 viQ = valueLow;
861 voQ = valueHi;
862 mgtQ = valueHi;
863 hiQ = valueHi;
864 }
865 _rtl92c_init_chipN_reg_priority(hw, beQ, bkQ, viQ, voQ, mgtQ, hiQ);
866 printk(KERN_INFO "rtl8192cu: Tx queue select: 0x%02x\n", queue_sel);
867}
868
869static void _rtl92cu_init_chipN_three_out_ep_priority(struct ieee80211_hw *hw,
870 bool wmm_enable,
871 u8 queue_sel)
872{
873 u16 beQ, bkQ, viQ, voQ, mgtQ, hiQ;
874 struct rtl_priv *rtlpriv = rtl_priv(hw);
875
876 if (!wmm_enable) { /* typical setting */
877 beQ = QUEUE_LOW;
878 bkQ = QUEUE_LOW;
879 viQ = QUEUE_NORMAL;
880 voQ = QUEUE_HIGH;
881 mgtQ = QUEUE_HIGH;
882 hiQ = QUEUE_HIGH;
883 } else { /* for WMM */
884 beQ = QUEUE_LOW;
885 bkQ = QUEUE_NORMAL;
886 viQ = QUEUE_NORMAL;
887 voQ = QUEUE_HIGH;
888 mgtQ = QUEUE_HIGH;
889 hiQ = QUEUE_HIGH;
890 }
891 _rtl92c_init_chipN_reg_priority(hw, beQ, bkQ, viQ, voQ, mgtQ, hiQ);
892 RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
893 ("Tx queue select :0x%02x..\n", queue_sel));
894}
895
896static void _rtl92cu_init_chipN_queue_priority(struct ieee80211_hw *hw,
897 bool wmm_enable,
898 u8 out_ep_num,
899 u8 queue_sel)
900{
901 switch (out_ep_num) {
902 case 1:
903 _rtl92cu_init_chipN_one_out_ep_priority(hw, wmm_enable,
904 queue_sel);
905 break;
906 case 2:
907 _rtl92cu_init_chipN_two_out_ep_priority(hw, wmm_enable,
908 queue_sel);
909 break;
910 case 3:
911 _rtl92cu_init_chipN_three_out_ep_priority(hw, wmm_enable,
912 queue_sel);
913 break;
914 default:
915 WARN_ON(1); /* Shall not reach here! */
916 break;
917 }
918}
919
920static void _rtl92cu_init_chipT_queue_priority(struct ieee80211_hw *hw,
921 bool wmm_enable,
922 u8 out_ep_num,
923 u8 queue_sel)
924{
925 u8 hq_sele;
926 struct rtl_priv *rtlpriv = rtl_priv(hw);
927
928 switch (out_ep_num) {
929 case 2: /* (TX_SELE_HQ|TX_SELE_LQ) */
930 if (!wmm_enable) /* typical setting */
931 hq_sele = HQSEL_VOQ | HQSEL_VIQ | HQSEL_MGTQ |
932 HQSEL_HIQ;
933 else /* for WMM */
934 hq_sele = HQSEL_VOQ | HQSEL_BEQ | HQSEL_MGTQ |
935 HQSEL_HIQ;
936 break;
937 case 1:
938 if (TX_SELE_LQ == queue_sel) {
939 /* map all endpoint to Low queue */
940 hq_sele = 0;
941 } else if (TX_SELE_HQ == queue_sel) {
942 /* map all endpoint to High queue */
943 hq_sele = HQSEL_VOQ | HQSEL_VIQ | HQSEL_BEQ |
944 HQSEL_BKQ | HQSEL_MGTQ | HQSEL_HIQ;
945 }
946 break;
947 default:
948 WARN_ON(1); /* Shall not reach here! */
949 break;
950 }
951 rtl_write_byte(rtlpriv, (REG_TRXDMA_CTRL+1), hq_sele);
952 RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
953 ("Tx queue select :0x%02x..\n", hq_sele));
954}
955
956static void _rtl92cu_init_queue_priority(struct ieee80211_hw *hw,
957 bool wmm_enable,
958 u8 out_ep_num,
959 u8 queue_sel)
960{
961 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
962 if (IS_NORMAL_CHIP(rtlhal->version))
963 _rtl92cu_init_chipN_queue_priority(hw, wmm_enable, out_ep_num,
964 queue_sel);
965 else
966 _rtl92cu_init_chipT_queue_priority(hw, wmm_enable, out_ep_num,
967 queue_sel);
968}
969
970static void _rtl92cu_init_usb_aggregation(struct ieee80211_hw *hw)
971{
972}
973
974static void _rtl92cu_init_wmac_setting(struct ieee80211_hw *hw)
975{
976 u16 value16;
977
978 struct rtl_priv *rtlpriv = rtl_priv(hw);
979 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
980
981 mac->rx_conf = (RCR_APM | RCR_AM | RCR_ADF | RCR_AB | RCR_APP_FCS |
982 RCR_APP_ICV | RCR_AMF | RCR_HTC_LOC_CTRL |
983 RCR_APP_MIC | RCR_APP_PHYSTS | RCR_ACRC32);
984 rtl_write_dword(rtlpriv, REG_RCR, mac->rx_conf);
985 /* Accept all multicast address */
986 rtl_write_dword(rtlpriv, REG_MAR, 0xFFFFFFFF);
987 rtl_write_dword(rtlpriv, REG_MAR + 4, 0xFFFFFFFF);
988 /* Accept all management frames */
989 value16 = 0xFFFF;
990 rtl92c_set_mgt_filter(hw, value16);
991 /* Reject all control frame - default value is 0 */
992 rtl92c_set_ctrl_filter(hw, 0x0);
993 /* Accept all data frames */
994 value16 = 0xFFFF;
995 rtl92c_set_data_filter(hw, value16);
996}
997
998static int _rtl92cu_init_mac(struct ieee80211_hw *hw)
999{
1000 struct rtl_priv *rtlpriv = rtl_priv(hw);
1001 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1002 struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
1003 struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
1004 int err = 0;
1005 u32 boundary = 0;
1006 u8 wmm_enable = false; /* TODO */
1007 u8 out_ep_nums = rtlusb->out_ep_nums;
1008 u8 queue_sel = rtlusb->out_queue_sel;
1009 err = _rtl92cu_init_power_on(hw);
1010
1011 if (err) {
1012 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1013 ("Failed to init power on!\n"));
1014 return err;
1015 }
1016 if (!wmm_enable) {
1017 boundary = TX_PAGE_BOUNDARY;
1018 } else { /* for WMM */
1019 boundary = (IS_NORMAL_CHIP(rtlhal->version))
1020 ? WMM_CHIP_B_TX_PAGE_BOUNDARY
1021 : WMM_CHIP_A_TX_PAGE_BOUNDARY;
1022 }
1023 if (false == rtl92c_init_llt_table(hw, boundary)) {
1024 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1025 ("Failed to init LLT Table!\n"));
1026 return -EINVAL;
1027 }
1028 _rtl92cu_init_queue_reserved_page(hw, wmm_enable, out_ep_nums,
1029 queue_sel);
1030 _rtl92c_init_trx_buffer(hw, wmm_enable);
1031 _rtl92cu_init_queue_priority(hw, wmm_enable, out_ep_nums,
1032 queue_sel);
1033 /* Get Rx PHY status in order to report RSSI and others. */
1034 rtl92c_init_driver_info_size(hw, RTL92C_DRIVER_INFO_SIZE);
1035 rtl92c_init_interrupt(hw);
1036 rtl92c_init_network_type(hw);
1037 _rtl92cu_init_wmac_setting(hw);
1038 rtl92c_init_adaptive_ctrl(hw);
1039 rtl92c_init_edca(hw);
1040 rtl92c_init_rate_fallback(hw);
1041 rtl92c_init_retry_function(hw);
1042 _rtl92cu_init_usb_aggregation(hw);
1043 rtlpriv->cfg->ops->set_bw_mode(hw, NL80211_CHAN_HT20);
1044 rtl92c_set_min_space(hw, IS_92C_SERIAL(rtlhal->version));
1045 rtl92c_init_beacon_parameters(hw, rtlhal->version);
1046 rtl92c_init_ampdu_aggregation(hw);
1047 rtl92c_init_beacon_max_error(hw, true);
1048 return err;
1049}
1050
1051void rtl92cu_enable_hw_security_config(struct ieee80211_hw *hw)
1052{
1053 struct rtl_priv *rtlpriv = rtl_priv(hw);
1054 u8 sec_reg_value = 0x0;
1055 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
1056
1057 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1058 ("PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
1059 rtlpriv->sec.pairwise_enc_algorithm,
1060 rtlpriv->sec.group_enc_algorithm));
1061 if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
1062 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
1063 ("not open sw encryption\n"));
1064 return;
1065 }
1066 sec_reg_value = SCR_TxEncEnable | SCR_RxDecEnable;
1067 if (rtlpriv->sec.use_defaultkey) {
1068 sec_reg_value |= SCR_TxUseDK;
1069 sec_reg_value |= SCR_RxUseDK;
1070 }
1071 if (IS_NORMAL_CHIP(rtlhal->version))
1072 sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK);
1073 rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
1074 RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
1075 ("The SECR-value %x\n", sec_reg_value));
1076 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
1077}
1078
1079static void _rtl92cu_hw_configure(struct ieee80211_hw *hw)
1080{
1081 struct rtl_priv *rtlpriv = rtl_priv(hw);
1082 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
1083
1084 /* To Fix MAC loopback mode fail. */
1085 rtl_write_byte(rtlpriv, REG_LDOHCI12_CTRL, 0x0f);
1086 rtl_write_byte(rtlpriv, 0x15, 0xe9);
1087 /* HW SEQ CTRL */
1088 /* set 0x0 to 0xFF by tynli. Default enable HW SEQ NUM. */
1089 rtl_write_byte(rtlpriv, REG_HWSEQ_CTRL, 0xFF);
1090 /* fixed USB interface interference issue */
1091 rtl_write_byte(rtlpriv, 0xfe40, 0xe0);
1092 rtl_write_byte(rtlpriv, 0xfe41, 0x8d);
1093 rtl_write_byte(rtlpriv, 0xfe42, 0x80);
1094 rtlusb->reg_bcn_ctrl_val = 0x18;
1095 rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8)rtlusb->reg_bcn_ctrl_val);
1096}
1097
1098static void _InitPABias(struct ieee80211_hw *hw)
1099{
1100 struct rtl_priv *rtlpriv = rtl_priv(hw);
1101 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1102 u8 pa_setting;
1103
1104 /* FIXED PA current issue */
1105 pa_setting = efuse_read_1byte(hw, 0x1FA);
1106 if (!(pa_setting & BIT(0))) {
1107 rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0FFFFF, 0x0F406);
1108 rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0FFFFF, 0x4F406);
1109 rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0FFFFF, 0x8F406);
1110 rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0FFFFF, 0xCF406);
1111 }
1112 if (!(pa_setting & BIT(1)) && IS_NORMAL_CHIP(rtlhal->version) &&
1113 IS_92C_SERIAL(rtlhal->version)) {
1114 rtl_set_rfreg(hw, RF90_PATH_B, 0x15, 0x0FFFFF, 0x0F406);
1115 rtl_set_rfreg(hw, RF90_PATH_B, 0x15, 0x0FFFFF, 0x4F406);
1116 rtl_set_rfreg(hw, RF90_PATH_B, 0x15, 0x0FFFFF, 0x8F406);
1117 rtl_set_rfreg(hw, RF90_PATH_B, 0x15, 0x0FFFFF, 0xCF406);
1118 }
1119 if (!(pa_setting & BIT(4))) {
1120 pa_setting = rtl_read_byte(rtlpriv, 0x16);
1121 pa_setting &= 0x0F;
1122 rtl_write_byte(rtlpriv, 0x16, pa_setting | 0x90);
1123 }
1124}
1125
1126static void _InitAntenna_Selection(struct ieee80211_hw *hw)
1127{
1128#ifdef CONFIG_ANTENNA_DIVERSITY
1129 struct rtl_priv *rtlpriv = rtl_priv(hw);
1130 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1131 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1132
1133 if (pHalData->AntDivCfg == 0)
1134 return;
1135
1136 if (rtlphy->rf_type == RF_1T1R) {
1137 rtl_write_dword(rtlpriv, REG_LEDCFG0,
1138 rtl_read_dword(rtlpriv,
1139 REG_LEDCFG0)|BIT(23));
1140 rtl_set_bbreg(hw, rFPGA0_XAB_RFPARAMETER, BIT(13), 0x01);
1141 if (rtl_get_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300) ==
1142 Antenna_A)
1143 pHalData->CurAntenna = Antenna_A;
1144 else
1145 pHalData->CurAntenna = Antenna_B;
1146 }
1147#endif
1148}
1149
1150static void _dump_registers(struct ieee80211_hw *hw)
1151{
1152}
1153
1154static void _update_mac_setting(struct ieee80211_hw *hw)
1155{
1156 struct rtl_priv *rtlpriv = rtl_priv(hw);
1157 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1158
1159 mac->rx_conf = rtl_read_dword(rtlpriv, REG_RCR);
1160 mac->rx_mgt_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP0);
1161 mac->rx_ctrl_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP1);
1162 mac->rx_data_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP2);
1163}
1164
1165int rtl92cu_hw_init(struct ieee80211_hw *hw)
1166{
1167 struct rtl_priv *rtlpriv = rtl_priv(hw);
1168 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1169 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1170 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1171 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1172 int err = 0;
1173 static bool iqk_initialized;
1174
1175 rtlhal->hw_type = HARDWARE_TYPE_RTL8192CU;
1176 err = _rtl92cu_init_mac(hw);
1177 if (err) {
1178 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("init mac failed!\n"));
1179 return err;
1180 }
1181 err = rtl92c_download_fw(hw);
1182 if (err) {
1183 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1184 ("Failed to download FW. Init HW without FW now..\n"));
1185 err = 1;
1186 rtlhal->fw_ready = false;
1187 return err;
1188 } else {
1189 rtlhal->fw_ready = true;
1190 }
1191 rtlhal->last_hmeboxnum = 0; /* h2c */
1192 _rtl92cu_phy_param_tab_init(hw);
1193 rtl92c_phy_mac_config(hw);
1194 rtl92c_phy_bb_config(hw);
1195 rtlphy->rf_mode = RF_OP_BY_SW_3WIRE;
1196 rtl92c_phy_rf_config(hw);
1197 if (IS_VENDOR_UMC_A_CUT(rtlhal->version) &&
1198 !IS_92C_SERIAL(rtlhal->version)) {
1199 rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G1, MASKDWORD, 0x30255);
1200 rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G2, MASKDWORD, 0x50a00);
1201 }
1202 rtlphy->rfreg_chnlval[0] = rtl_get_rfreg(hw, (enum radio_path)0,
1203 RF_CHNLBW, RFREG_OFFSET_MASK);
1204 rtlphy->rfreg_chnlval[1] = rtl_get_rfreg(hw, (enum radio_path)1,
1205 RF_CHNLBW, RFREG_OFFSET_MASK);
1206 rtl92c_bb_block_on(hw);
1207 rtl_cam_reset_all_entry(hw);
1208 rtl92cu_enable_hw_security_config(hw);
1209 ppsc->rfpwr_state = ERFON;
1210 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, mac->mac_addr);
1211 if (ppsc->rfpwr_state == ERFON) {
1212 rtl92c_phy_set_rfpath_switch(hw, 1);
1213 if (iqk_initialized) {
1214 rtl92c_phy_iq_calibrate(hw, false);
1215 } else {
1216 rtl92c_phy_iq_calibrate(hw, false);
1217 iqk_initialized = true;
1218 }
1219 rtl92c_dm_check_txpower_tracking(hw);
1220 rtl92c_phy_lc_calibrate(hw);
1221 }
1222 _rtl92cu_hw_configure(hw);
1223 _InitPABias(hw);
1224 _InitAntenna_Selection(hw);
1225 _update_mac_setting(hw);
1226 rtl92c_dm_init(hw);
1227 _dump_registers(hw);
1228 return err;
1229}
1230
1231static void _DisableRFAFEAndResetBB(struct ieee80211_hw *hw)
1232{
1233 struct rtl_priv *rtlpriv = rtl_priv(hw);
1234/**************************************
1235a. TXPAUSE 0x522[7:0] = 0xFF Pause MAC TX queue
1236b. RF path 0 offset 0x00 = 0x00 disable RF
1237c. APSD_CTRL 0x600[7:0] = 0x40
1238d. SYS_FUNC_EN 0x02[7:0] = 0x16 reset BB state machine
1239e. SYS_FUNC_EN 0x02[7:0] = 0x14 reset BB state machine
1240***************************************/
1241 u8 eRFPath = 0, value8 = 0;
1242 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
1243 rtl_set_rfreg(hw, (enum radio_path)eRFPath, 0x0, MASKBYTE0, 0x0);
1244
1245 value8 |= APSDOFF;
1246 rtl_write_byte(rtlpriv, REG_APSD_CTRL, value8); /*0x40*/
1247 value8 = 0;
1248 value8 |= (FEN_USBD | FEN_USBA | FEN_BB_GLB_RSTn);
1249 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, value8);/*0x16*/
1250 value8 &= (~FEN_BB_GLB_RSTn);
1251 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, value8); /*0x14*/
1252}
1253
1254static void _ResetDigitalProcedure1(struct ieee80211_hw *hw, bool bWithoutHWSM)
1255{
1256 struct rtl_priv *rtlpriv = rtl_priv(hw);
1257 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1258
1259 if (rtlhal->fw_version <= 0x20) {
1260 /*****************************
1261 f. MCUFWDL 0x80[7:0]=0 reset MCU ready status
1262 g. SYS_FUNC_EN 0x02[10]= 0 reset MCU reg, (8051 reset)
1263 h. SYS_FUNC_EN 0x02[15-12]= 5 reset MAC reg, DCORE
1264 i. SYS_FUNC_EN 0x02[10]= 1 enable MCU reg, (8051 enable)
1265 ******************************/
1266 u16 valu16 = 0;
1267
1268 rtl_write_byte(rtlpriv, REG_MCUFWDL, 0);
1269 valu16 = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
1270 rtl_write_word(rtlpriv, REG_SYS_FUNC_EN, (valu16 &
1271 (~FEN_CPUEN))); /* reset MCU ,8051 */
1272 valu16 = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN)&0x0FFF;
1273 rtl_write_word(rtlpriv, REG_SYS_FUNC_EN, (valu16 |
1274 (FEN_HWPDN|FEN_ELDR))); /* reset MAC */
1275 valu16 = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
1276 rtl_write_word(rtlpriv, REG_SYS_FUNC_EN, (valu16 |
1277 FEN_CPUEN)); /* enable MCU ,8051 */
1278 } else {
1279 u8 retry_cnts = 0;
1280
1281 /* IF fw in RAM code, do reset */
1282 if (rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(1)) {
1283 /* reset MCU ready status */
1284 rtl_write_byte(rtlpriv, REG_MCUFWDL, 0);
1285 if (rtlhal->fw_ready) {
1286 /* 8051 reset by self */
1287 rtl_write_byte(rtlpriv, REG_HMETFR+3, 0x20);
1288 while ((retry_cnts++ < 100) &&
1289 (FEN_CPUEN & rtl_read_word(rtlpriv,
1290 REG_SYS_FUNC_EN))) {
1291 udelay(50);
1292 }
1293 if (retry_cnts >= 100) {
1294 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1295 ("#####=> 8051 reset failed!.."
1296 ".......................\n"););
1297 /* if 8051 reset fail, reset MAC. */
1298 rtl_write_byte(rtlpriv,
1299 REG_SYS_FUNC_EN + 1,
1300 0x50);
1301 udelay(100);
1302 }
1303 }
1304 }
1305 /* Reset MAC and Enable 8051 */
1306 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, 0x54);
1307 rtl_write_byte(rtlpriv, REG_MCUFWDL, 0);
1308 }
1309 if (bWithoutHWSM) {
1310 /*****************************
1311 Without HW auto state machine
1312 g.SYS_CLKR 0x08[15:0] = 0x30A3 disable MAC clock
1313 h.AFE_PLL_CTRL 0x28[7:0] = 0x80 disable AFE PLL
1314 i.AFE_XTAL_CTRL 0x24[15:0] = 0x880F gated AFE DIG_CLOCK
1315 j.SYS_ISu_CTRL 0x00[7:0] = 0xF9 isolated digital to PON
1316 ******************************/
1317 rtl_write_word(rtlpriv, REG_SYS_CLKR, 0x70A3);
1318 rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL, 0x80);
1319 rtl_write_word(rtlpriv, REG_AFE_XTAL_CTRL, 0x880F);
1320 rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL, 0xF9);
1321 }
1322}
1323
1324static void _ResetDigitalProcedure2(struct ieee80211_hw *hw)
1325{
1326 struct rtl_priv *rtlpriv = rtl_priv(hw);
1327/*****************************
1328k. SYS_FUNC_EN 0x03[7:0] = 0x44 disable ELDR runction
1329l. SYS_CLKR 0x08[15:0] = 0x3083 disable ELDR clock
1330m. SYS_ISO_CTRL 0x01[7:0] = 0x83 isolated ELDR to PON
1331******************************/
1332 rtl_write_word(rtlpriv, REG_SYS_CLKR, 0x70A3);
1333 rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL+1, 0x82);
1334}
1335
1336static void _DisableGPIO(struct ieee80211_hw *hw)
1337{
1338 struct rtl_priv *rtlpriv = rtl_priv(hw);
1339/***************************************
1340j. GPIO_PIN_CTRL 0x44[31:0]=0x000
1341k. Value = GPIO_PIN_CTRL[7:0]
1342l. GPIO_PIN_CTRL 0x44[31:0] = 0x00FF0000 | (value <<8); write ext PIN level
1343m. GPIO_MUXCFG 0x42 [15:0] = 0x0780
1344n. LEDCFG 0x4C[15:0] = 0x8080
1345***************************************/
1346 u8 value8;
1347 u16 value16;
1348 u32 value32;
1349
1350 /* 1. Disable GPIO[7:0] */
1351 rtl_write_word(rtlpriv, REG_GPIO_PIN_CTRL+2, 0x0000);
1352 value32 = rtl_read_dword(rtlpriv, REG_GPIO_PIN_CTRL) & 0xFFFF00FF;
1353 value8 = (u8) (value32&0x000000FF);
1354 value32 |= ((value8<<8) | 0x00FF0000);
1355 rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, value32);
1356 /* 2. Disable GPIO[10:8] */
1357 rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG+3, 0x00);
1358 value16 = rtl_read_word(rtlpriv, REG_GPIO_MUXCFG+2) & 0xFF0F;
1359 value8 = (u8) (value16&0x000F);
1360 value16 |= ((value8<<4) | 0x0780);
1361 rtl_write_word(rtlpriv, REG_GPIO_PIN_CTRL+2, value16);
1362 /* 3. Disable LED0 & 1 */
1363 rtl_write_word(rtlpriv, REG_LEDCFG0, 0x8080);
1364}
1365
1366static void _DisableAnalog(struct ieee80211_hw *hw, bool bWithoutHWSM)
1367{
1368 struct rtl_priv *rtlpriv = rtl_priv(hw);
1369 u16 value16 = 0;
1370 u8 value8 = 0;
1371
1372 if (bWithoutHWSM) {
1373 /*****************************
1374 n. LDOA15_CTRL 0x20[7:0] = 0x04 disable A15 power
1375 o. LDOV12D_CTRL 0x21[7:0] = 0x54 disable digital core power
1376 r. When driver call disable, the ASIC will turn off remaining
1377 clock automatically
1378 ******************************/
1379 rtl_write_byte(rtlpriv, REG_LDOA15_CTRL, 0x04);
1380 value8 = rtl_read_byte(rtlpriv, REG_LDOV12D_CTRL);
1381 value8 &= (~LDV12_EN);
1382 rtl_write_byte(rtlpriv, REG_LDOV12D_CTRL, value8);
1383 }
1384
1385/*****************************
1386h. SPS0_CTRL 0x11[7:0] = 0x23 enter PFM mode
1387i. APS_FSMCO 0x04[15:0] = 0x4802 set USB suspend
1388******************************/
1389 rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x23);
1390 value16 |= (APDM_HOST | AFSM_HSUS | PFM_ALDN);
1391 rtl_write_word(rtlpriv, REG_APS_FSMCO, (u16)value16);
1392 rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0E);
1393}
1394
1395static void _CardDisableHWSM(struct ieee80211_hw *hw)
1396{
1397 /* ==== RF Off Sequence ==== */
1398 _DisableRFAFEAndResetBB(hw);
1399 /* ==== Reset digital sequence ====== */
1400 _ResetDigitalProcedure1(hw, false);
1401 /* ==== Pull GPIO PIN to balance level and LED control ====== */
1402 _DisableGPIO(hw);
1403 /* ==== Disable analog sequence === */
1404 _DisableAnalog(hw, false);
1405}
1406
1407static void _CardDisableWithoutHWSM(struct ieee80211_hw *hw)
1408{
1409 /*==== RF Off Sequence ==== */
1410 _DisableRFAFEAndResetBB(hw);
1411 /* ==== Reset digital sequence ====== */
1412 _ResetDigitalProcedure1(hw, true);
1413 /* ==== Pull GPIO PIN to balance level and LED control ====== */
1414 _DisableGPIO(hw);
1415 /* ==== Reset digital sequence ====== */
1416 _ResetDigitalProcedure2(hw);
1417 /* ==== Disable analog sequence === */
1418 _DisableAnalog(hw, true);
1419}
1420
1421static void _rtl92cu_set_bcn_ctrl_reg(struct ieee80211_hw *hw,
1422 u8 set_bits, u8 clear_bits)
1423{
1424 struct rtl_priv *rtlpriv = rtl_priv(hw);
1425 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
1426
1427 rtlusb->reg_bcn_ctrl_val |= set_bits;
1428 rtlusb->reg_bcn_ctrl_val &= ~clear_bits;
1429 rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8) rtlusb->reg_bcn_ctrl_val);
1430}
1431
1432static void _rtl92cu_stop_tx_beacon(struct ieee80211_hw *hw)
1433{
1434 struct rtl_priv *rtlpriv = rtl_priv(hw);
1435 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
1436 u8 tmp1byte = 0;
1437 if (IS_NORMAL_CHIP(rtlhal->version)) {
1438 tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
1439 rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
1440 tmp1byte & (~BIT(6)));
1441 rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0x64);
1442 tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
1443 tmp1byte &= ~(BIT(0));
1444 rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
1445 } else {
1446 rtl_write_byte(rtlpriv, REG_TXPAUSE,
1447 rtl_read_byte(rtlpriv, REG_TXPAUSE) | BIT(6));
1448 }
1449}
1450
1451static void _rtl92cu_resume_tx_beacon(struct ieee80211_hw *hw)
1452{
1453 struct rtl_priv *rtlpriv = rtl_priv(hw);
1454 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
1455 u8 tmp1byte = 0;
1456
1457 if (IS_NORMAL_CHIP(rtlhal->version)) {
1458 tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
1459 rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
1460 tmp1byte | BIT(6));
1461 rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff);
1462 tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
1463 tmp1byte |= BIT(0);
1464 rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
1465 } else {
1466 rtl_write_byte(rtlpriv, REG_TXPAUSE,
1467 rtl_read_byte(rtlpriv, REG_TXPAUSE) & (~BIT(6)));
1468 }
1469}
1470
1471static void _rtl92cu_enable_bcn_sub_func(struct ieee80211_hw *hw)
1472{
1473 struct rtl_priv *rtlpriv = rtl_priv(hw);
1474 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
1475
1476 if (IS_NORMAL_CHIP(rtlhal->version))
1477 _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(1));
1478 else
1479 _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(4));
1480}
1481
1482static void _rtl92cu_disable_bcn_sub_func(struct ieee80211_hw *hw)
1483{
1484 struct rtl_priv *rtlpriv = rtl_priv(hw);
1485 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
1486
1487 if (IS_NORMAL_CHIP(rtlhal->version))
1488 _rtl92cu_set_bcn_ctrl_reg(hw, BIT(1), 0);
1489 else
1490 _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0);
1491}
1492
1493static int _rtl92cu_set_media_status(struct ieee80211_hw *hw,
1494 enum nl80211_iftype type)
1495{
1496 struct rtl_priv *rtlpriv = rtl_priv(hw);
1497 u8 bt_msr = rtl_read_byte(rtlpriv, MSR);
1498 enum led_ctl_mode ledaction = LED_CTL_NO_LINK;
1499
1500 bt_msr &= 0xfc;
1501 rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0xFF);
1502 if (type == NL80211_IFTYPE_UNSPECIFIED || type ==
1503 NL80211_IFTYPE_STATION) {
1504 _rtl92cu_stop_tx_beacon(hw);
1505 _rtl92cu_enable_bcn_sub_func(hw);
1506 } else if (type == NL80211_IFTYPE_ADHOC || type == NL80211_IFTYPE_AP) {
1507 _rtl92cu_resume_tx_beacon(hw);
1508 _rtl92cu_disable_bcn_sub_func(hw);
1509 } else {
1510 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, ("Set HW_VAR_MEDIA_"
1511 "STATUS:No such media status(%x).\n", type));
1512 }
1513 switch (type) {
1514 case NL80211_IFTYPE_UNSPECIFIED:
1515 bt_msr |= MSR_NOLINK;
1516 ledaction = LED_CTL_LINK;
1517 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
1518 ("Set Network type to NO LINK!\n"));
1519 break;
1520 case NL80211_IFTYPE_ADHOC:
1521 bt_msr |= MSR_ADHOC;
1522 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
1523 ("Set Network type to Ad Hoc!\n"));
1524 break;
1525 case NL80211_IFTYPE_STATION:
1526 bt_msr |= MSR_INFRA;
1527 ledaction = LED_CTL_LINK;
1528 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
1529 ("Set Network type to STA!\n"));
1530 break;
1531 case NL80211_IFTYPE_AP:
1532 bt_msr |= MSR_AP;
1533 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
1534 ("Set Network type to AP!\n"));
1535 break;
1536 default:
1537 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1538 ("Network type %d not support!\n", type));
1539 goto error_out;
1540 }
1541 rtl_write_byte(rtlpriv, (MSR), bt_msr);
1542 rtlpriv->cfg->ops->led_control(hw, ledaction);
1543 if ((bt_msr & 0xfc) == MSR_AP)
1544 rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
1545 else
1546 rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66);
1547 return 0;
1548error_out:
1549 return 1;
1550}
1551
1552void rtl92cu_card_disable(struct ieee80211_hw *hw)
1553{
1554 struct rtl_priv *rtlpriv = rtl_priv(hw);
1555 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1556 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
1557 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1558 enum nl80211_iftype opmode;
1559
1560 mac->link_state = MAC80211_NOLINK;
1561 opmode = NL80211_IFTYPE_UNSPECIFIED;
1562 _rtl92cu_set_media_status(hw, opmode);
1563 rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
1564 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
1565 if (rtlusb->disableHWSM)
1566 _CardDisableHWSM(hw);
1567 else
1568 _CardDisableWithoutHWSM(hw);
1569}
1570
1571void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
1572{
1573 /* dummy routine needed for callback from rtl_op_configure_filter() */
1574}
1575
1576/*========================================================================== */
1577
1578static void _rtl92cu_set_check_bssid(struct ieee80211_hw *hw,
1579 enum nl80211_iftype type)
1580{
1581 struct rtl_priv *rtlpriv = rtl_priv(hw);
1582 u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR);
1583 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
1584 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1585 u8 filterout_non_associated_bssid = false;
1586
1587 switch (type) {
1588 case NL80211_IFTYPE_ADHOC:
1589 case NL80211_IFTYPE_STATION:
1590 filterout_non_associated_bssid = true;
1591 break;
1592 case NL80211_IFTYPE_UNSPECIFIED:
1593 case NL80211_IFTYPE_AP:
1594 default:
1595 break;
1596 }
1597 if (filterout_non_associated_bssid == true) {
1598 if (IS_NORMAL_CHIP(rtlhal->version)) {
1599 switch (rtlphy->current_io_type) {
1600 case IO_CMD_RESUME_DM_BY_SCAN:
1601 reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
1602 rtlpriv->cfg->ops->set_hw_reg(hw,
1603 HW_VAR_RCR, (u8 *)(&reg_rcr));
1604 /* enable update TSF */
1605 _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(4));
1606 break;
1607 case IO_CMD_PAUSE_DM_BY_SCAN:
1608 reg_rcr &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN);
1609 rtlpriv->cfg->ops->set_hw_reg(hw,
1610 HW_VAR_RCR, (u8 *)(&reg_rcr));
1611 /* disable update TSF */
1612 _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0);
1613 break;
1614 }
1615 } else {
1616 reg_rcr |= (RCR_CBSSID);
1617 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
1618 (u8 *)(&reg_rcr));
1619 _rtl92cu_set_bcn_ctrl_reg(hw, 0, (BIT(4)|BIT(5)));
1620 }
1621 } else if (filterout_non_associated_bssid == false) {
1622 if (IS_NORMAL_CHIP(rtlhal->version)) {
1623 reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN));
1624 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
1625 (u8 *)(&reg_rcr));
1626 _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0);
1627 } else {
1628 reg_rcr &= (~RCR_CBSSID);
1629 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
1630 (u8 *)(&reg_rcr));
1631 _rtl92cu_set_bcn_ctrl_reg(hw, (BIT(4)|BIT(5)), 0);
1632 }
1633 }
1634}
1635
1636int rtl92cu_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type)
1637{
1638 if (_rtl92cu_set_media_status(hw, type))
1639 return -EOPNOTSUPP;
1640 _rtl92cu_set_check_bssid(hw, type);
1641 return 0;
1642}
1643
1644static void _InitBeaconParameters(struct ieee80211_hw *hw)
1645{
1646 struct rtl_priv *rtlpriv = rtl_priv(hw);
1647 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
1648
1649 rtl_write_word(rtlpriv, REG_BCN_CTRL, 0x1010);
1650
1651 /* TODO: Remove these magic number */
1652 rtl_write_word(rtlpriv, REG_TBTT_PROHIBIT, 0x6404);
1653 rtl_write_byte(rtlpriv, REG_DRVERLYINT, DRIVER_EARLY_INT_TIME);
1654 rtl_write_byte(rtlpriv, REG_BCNDMATIM, BCN_DMA_ATIME_INT_TIME);
1655 /* Change beacon AIFS to the largest number
1656 * beacause test chip does not contension before sending beacon. */
1657 if (IS_NORMAL_CHIP(rtlhal->version))
1658 rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660F);
1659 else
1660 rtl_write_word(rtlpriv, REG_BCNTCFG, 0x66FF);
1661}
1662
1663static void _beacon_function_enable(struct ieee80211_hw *hw, bool Enable,
1664 bool Linked)
1665{
1666 struct rtl_priv *rtlpriv = rtl_priv(hw);
1667
1668 _rtl92cu_set_bcn_ctrl_reg(hw, (BIT(4) | BIT(3) | BIT(1)), 0x00);
1669 rtl_write_byte(rtlpriv, REG_RD_CTRL+1, 0x6F);
1670}
1671
1672void rtl92cu_set_beacon_related_registers(struct ieee80211_hw *hw)
1673{
1674
1675 struct rtl_priv *rtlpriv = rtl_priv(hw);
1676 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1677 u16 bcn_interval, atim_window;
1678 u32 value32;
1679
1680 bcn_interval = mac->beacon_interval;
1681 atim_window = 2; /*FIX MERGE */
1682 rtl_write_word(rtlpriv, REG_ATIMWND, atim_window);
1683 rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
1684 _InitBeaconParameters(hw);
1685 rtl_write_byte(rtlpriv, REG_SLOT, 0x09);
1686 /*
1687 * Force beacon frame transmission even after receiving beacon frame
1688 * from other ad hoc STA
1689 *
1690 *
1691 * Reset TSF Timer to zero, added by Roger. 2008.06.24
1692 */
1693 value32 = rtl_read_dword(rtlpriv, REG_TCR);
1694 value32 &= ~TSFRST;
1695 rtl_write_dword(rtlpriv, REG_TCR, value32);
1696 value32 |= TSFRST;
1697 rtl_write_dword(rtlpriv, REG_TCR, value32);
1698 RT_TRACE(rtlpriv, COMP_INIT|COMP_BEACON, DBG_LOUD,
1699 ("SetBeaconRelatedRegisters8192CUsb(): Set TCR(%x)\n",
1700 value32));
1701 /* TODO: Modify later (Find the right parameters)
1702 * NOTE: Fix test chip's bug (about contention windows's randomness) */
1703 if ((mac->opmode == NL80211_IFTYPE_ADHOC) ||
1704 (mac->opmode == NL80211_IFTYPE_AP)) {
1705 rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_CCK, 0x50);
1706 rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x50);
1707 }
1708 _beacon_function_enable(hw, true, true);
1709}
1710
1711void rtl92cu_set_beacon_interval(struct ieee80211_hw *hw)
1712{
1713 struct rtl_priv *rtlpriv = rtl_priv(hw);
1714 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1715 u16 bcn_interval = mac->beacon_interval;
1716
1717 RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
1718 ("beacon_interval:%d\n", bcn_interval));
1719 rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
1720}
1721
1722void rtl92cu_update_interrupt_mask(struct ieee80211_hw *hw,
1723 u32 add_msr, u32 rm_msr)
1724{
1725}
1726
1727void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
1728{
1729 struct rtl_priv *rtlpriv = rtl_priv(hw);
1730 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1731 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1732
1733 switch (variable) {
1734 case HW_VAR_RCR:
1735 *((u32 *)(val)) = mac->rx_conf;
1736 break;
1737 case HW_VAR_RF_STATE:
1738 *((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state;
1739 break;
1740 case HW_VAR_FWLPS_RF_ON:{
1741 enum rf_pwrstate rfState;
1742 u32 val_rcr;
1743
1744 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RF_STATE,
1745 (u8 *)(&rfState));
1746 if (rfState == ERFOFF) {
1747 *((bool *) (val)) = true;
1748 } else {
1749 val_rcr = rtl_read_dword(rtlpriv, REG_RCR);
1750 val_rcr &= 0x00070000;
1751 if (val_rcr)
1752 *((bool *) (val)) = false;
1753 else
1754 *((bool *) (val)) = true;
1755 }
1756 break;
1757 }
1758 case HW_VAR_FW_PSMODE_STATUS:
1759 *((bool *) (val)) = ppsc->fw_current_inpsmode;
1760 break;
1761 case HW_VAR_CORRECT_TSF:{
1762 u64 tsf;
1763 u32 *ptsf_low = (u32 *)&tsf;
1764 u32 *ptsf_high = ((u32 *)&tsf) + 1;
1765
1766 *ptsf_high = rtl_read_dword(rtlpriv, (REG_TSFTR + 4));
1767 *ptsf_low = rtl_read_dword(rtlpriv, REG_TSFTR);
1768 *((u64 *)(val)) = tsf;
1769 break;
1770 }
1771 case HW_VAR_MGT_FILTER:
1772 *((u16 *) (val)) = rtl_read_word(rtlpriv, REG_RXFLTMAP0);
1773 break;
1774 case HW_VAR_CTRL_FILTER:
1775 *((u16 *) (val)) = rtl_read_word(rtlpriv, REG_RXFLTMAP1);
1776 break;
1777 case HW_VAR_DATA_FILTER:
1778 *((u16 *) (val)) = rtl_read_word(rtlpriv, REG_RXFLTMAP2);
1779 break;
1780 default:
1781 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1782 ("switch case not process\n"));
1783 break;
1784 }
1785}
1786
1787void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
1788{
1789 struct rtl_priv *rtlpriv = rtl_priv(hw);
1790 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1791 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1792 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1793 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1794 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
1795 enum wireless_mode wirelessmode = mac->mode;
1796 u8 idx = 0;
1797
1798 switch (variable) {
1799 case HW_VAR_ETHER_ADDR:{
1800 for (idx = 0; idx < ETH_ALEN; idx++) {
1801 rtl_write_byte(rtlpriv, (REG_MACID + idx),
1802 val[idx]);
1803 }
1804 break;
1805 }
1806 case HW_VAR_BASIC_RATE:{
1807 u16 rate_cfg = ((u16 *) val)[0];
1808 u8 rate_index = 0;
1809
1810 rate_cfg &= 0x15f;
1811 /* TODO */
1812 /* if (mac->current_network.vender == HT_IOT_PEER_CISCO
1813 * && ((rate_cfg & 0x150) == 0)) {
1814 * rate_cfg |= 0x010;
1815 * } */
1816 rate_cfg |= 0x01;
1817 rtl_write_byte(rtlpriv, REG_RRSR, rate_cfg & 0xff);
1818 rtl_write_byte(rtlpriv, REG_RRSR + 1,
1819 (rate_cfg >> 8) & 0xff);
1820 while (rate_cfg > 0x1) {
1821 rate_cfg >>= 1;
1822 rate_index++;
1823 }
1824 rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL,
1825 rate_index);
1826 break;
1827 }
1828 case HW_VAR_BSSID:{
1829 for (idx = 0; idx < ETH_ALEN; idx++) {
1830 rtl_write_byte(rtlpriv, (REG_BSSID + idx),
1831 val[idx]);
1832 }
1833 break;
1834 }
1835 case HW_VAR_SIFS:{
1836 rtl_write_byte(rtlpriv, REG_SIFS_CCK + 1, val[0]);
1837 rtl_write_byte(rtlpriv, REG_SIFS_OFDM + 1, val[1]);
1838 rtl_write_byte(rtlpriv, REG_SPEC_SIFS + 1, val[0]);
1839 rtl_write_byte(rtlpriv, REG_MAC_SPEC_SIFS + 1, val[0]);
1840 rtl_write_byte(rtlpriv, REG_R2T_SIFS+1, val[0]);
1841 rtl_write_byte(rtlpriv, REG_T2T_SIFS+1, val[0]);
1842 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
1843 ("HW_VAR_SIFS\n"));
1844 break;
1845 }
1846 case HW_VAR_SLOT_TIME:{
1847 u8 e_aci;
1848 u8 QOS_MODE = 1;
1849
1850 rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
1851 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
1852 ("HW_VAR_SLOT_TIME %x\n", val[0]));
1853 if (QOS_MODE) {
1854 for (e_aci = 0; e_aci < AC_MAX; e_aci++)
1855 rtlpriv->cfg->ops->set_hw_reg(hw,
1856 HW_VAR_AC_PARAM,
1857 (u8 *)(&e_aci));
1858 } else {
1859 u8 sifstime = 0;
1860 u8 u1bAIFS;
1861
1862 if (IS_WIRELESS_MODE_A(wirelessmode) ||
1863 IS_WIRELESS_MODE_N_24G(wirelessmode) ||
1864 IS_WIRELESS_MODE_N_5G(wirelessmode))
1865 sifstime = 16;
1866 else
1867 sifstime = 10;
1868 u1bAIFS = sifstime + (2 * val[0]);
1869 rtl_write_byte(rtlpriv, REG_EDCA_VO_PARAM,
1870 u1bAIFS);
1871 rtl_write_byte(rtlpriv, REG_EDCA_VI_PARAM,
1872 u1bAIFS);
1873 rtl_write_byte(rtlpriv, REG_EDCA_BE_PARAM,
1874 u1bAIFS);
1875 rtl_write_byte(rtlpriv, REG_EDCA_BK_PARAM,
1876 u1bAIFS);
1877 }
1878 break;
1879 }
1880 case HW_VAR_ACK_PREAMBLE:{
1881 u8 reg_tmp;
1882 u8 short_preamble = (bool) (*(u8 *) val);
1883 reg_tmp = 0;
1884 if (short_preamble)
1885 reg_tmp |= 0x80;
1886 rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_tmp);
1887 break;
1888 }
1889 case HW_VAR_AMPDU_MIN_SPACE:{
1890 u8 min_spacing_to_set;
1891 u8 sec_min_space;
1892
1893 min_spacing_to_set = *((u8 *) val);
1894 if (min_spacing_to_set <= 7) {
1895 switch (rtlpriv->sec.pairwise_enc_algorithm) {
1896 case NO_ENCRYPTION:
1897 case AESCCMP_ENCRYPTION:
1898 sec_min_space = 0;
1899 break;
1900 case WEP40_ENCRYPTION:
1901 case WEP104_ENCRYPTION:
1902 case TKIP_ENCRYPTION:
1903 sec_min_space = 6;
1904 break;
1905 default:
1906 sec_min_space = 7;
1907 break;
1908 }
1909 if (min_spacing_to_set < sec_min_space)
1910 min_spacing_to_set = sec_min_space;
1911 mac->min_space_cfg = ((mac->min_space_cfg &
1912 0xf8) |
1913 min_spacing_to_set);
1914 *val = min_spacing_to_set;
1915 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
1916 ("Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
1917 mac->min_space_cfg));
1918 rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
1919 mac->min_space_cfg);
1920 }
1921 break;
1922 }
1923 case HW_VAR_SHORTGI_DENSITY:{
1924 u8 density_to_set;
1925
1926 density_to_set = *((u8 *) val);
1927 density_to_set &= 0x1f;
1928 mac->min_space_cfg &= 0x07;
1929 mac->min_space_cfg |= (density_to_set << 3);
1930 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
1931 ("Set HW_VAR_SHORTGI_DENSITY: %#x\n",
1932 mac->min_space_cfg));
1933 rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
1934 mac->min_space_cfg);
1935 break;
1936 }
1937 case HW_VAR_AMPDU_FACTOR:{
1938 u8 regtoset_normal[4] = {0x41, 0xa8, 0x72, 0xb9};
1939 u8 factor_toset;
1940 u8 *p_regtoset = NULL;
1941 u8 index = 0;
1942
1943 p_regtoset = regtoset_normal;
1944 factor_toset = *((u8 *) val);
1945 if (factor_toset <= 3) {
1946 factor_toset = (1 << (factor_toset + 2));
1947 if (factor_toset > 0xf)
1948 factor_toset = 0xf;
1949 for (index = 0; index < 4; index++) {
1950 if ((p_regtoset[index] & 0xf0) >
1951 (factor_toset << 4))
1952 p_regtoset[index] =
1953 (p_regtoset[index] & 0x0f)
1954 | (factor_toset << 4);
1955 if ((p_regtoset[index] & 0x0f) >
1956 factor_toset)
1957 p_regtoset[index] =
1958 (p_regtoset[index] & 0xf0)
1959 | (factor_toset);
1960 rtl_write_byte(rtlpriv,
1961 (REG_AGGLEN_LMT + index),
1962 p_regtoset[index]);
1963 }
1964 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
1965 ("Set HW_VAR_AMPDU_FACTOR: %#x\n",
1966 factor_toset));
1967 }
1968 break;
1969 }
1970 case HW_VAR_AC_PARAM:{
1971 u8 e_aci = *((u8 *) val);
1972 u32 u4b_ac_param;
1973 u16 cw_min = le16_to_cpu(mac->ac[e_aci].cw_min);
1974 u16 cw_max = le16_to_cpu(mac->ac[e_aci].cw_max);
1975 u16 tx_op = le16_to_cpu(mac->ac[e_aci].tx_op);
1976
1977 u4b_ac_param = (u32) mac->ac[e_aci].aifs;
1978 u4b_ac_param |= (u32) ((cw_min & 0xF) <<
1979 AC_PARAM_ECW_MIN_OFFSET);
1980 u4b_ac_param |= (u32) ((cw_max & 0xF) <<
1981 AC_PARAM_ECW_MAX_OFFSET);
1982 u4b_ac_param |= (u32) tx_op << AC_PARAM_TXOP_OFFSET;
1983 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
1984 ("queue:%x, ac_param:%x\n", e_aci,
1985 u4b_ac_param));
1986 switch (e_aci) {
1987 case AC1_BK:
1988 rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM,
1989 u4b_ac_param);
1990 break;
1991 case AC0_BE:
1992 rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM,
1993 u4b_ac_param);
1994 break;
1995 case AC2_VI:
1996 rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM,
1997 u4b_ac_param);
1998 break;
1999 case AC3_VO:
2000 rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM,
2001 u4b_ac_param);
2002 break;
2003 default:
2004 RT_ASSERT(false, ("SetHwReg8185(): invalid"
2005 " aci: %d !\n", e_aci));
2006 break;
2007 }
2008 if (rtlusb->acm_method != eAcmWay2_SW)
2009 rtlpriv->cfg->ops->set_hw_reg(hw,
2010 HW_VAR_ACM_CTRL, (u8 *)(&e_aci));
2011 break;
2012 }
2013 case HW_VAR_ACM_CTRL:{
2014 u8 e_aci = *((u8 *) val);
2015 union aci_aifsn *p_aci_aifsn = (union aci_aifsn *)
2016 (&(mac->ac[0].aifs));
2017 u8 acm = p_aci_aifsn->f.acm;
2018 u8 acm_ctrl = rtl_read_byte(rtlpriv, REG_ACMHWCTRL);
2019
2020 acm_ctrl =
2021 acm_ctrl | ((rtlusb->acm_method == 2) ? 0x0 : 0x1);
2022 if (acm) {
2023 switch (e_aci) {
2024 case AC0_BE:
2025 acm_ctrl |= AcmHw_BeqEn;
2026 break;
2027 case AC2_VI:
2028 acm_ctrl |= AcmHw_ViqEn;
2029 break;
2030 case AC3_VO:
2031 acm_ctrl |= AcmHw_VoqEn;
2032 break;
2033 default:
2034 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
2035 ("HW_VAR_ACM_CTRL acm set "
2036 "failed: eACI is %d\n", acm));
2037 break;
2038 }
2039 } else {
2040 switch (e_aci) {
2041 case AC0_BE:
2042 acm_ctrl &= (~AcmHw_BeqEn);
2043 break;
2044 case AC2_VI:
2045 acm_ctrl &= (~AcmHw_ViqEn);
2046 break;
2047 case AC3_VO:
2048 acm_ctrl &= (~AcmHw_BeqEn);
2049 break;
2050 default:
2051 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
2052 ("switch case not process\n"));
2053 break;
2054 }
2055 }
2056 RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
2057 ("SetHwReg8190pci(): [HW_VAR_ACM_CTRL] "
2058 "Write 0x%X\n", acm_ctrl));
2059 rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
2060 break;
2061 }
2062 case HW_VAR_RCR:{
2063 rtl_write_dword(rtlpriv, REG_RCR, ((u32 *) (val))[0]);
2064 mac->rx_conf = ((u32 *) (val))[0];
2065 RT_TRACE(rtlpriv, COMP_RECV, DBG_DMESG,
2066 ("### Set RCR(0x%08x) ###\n", mac->rx_conf));
2067 break;
2068 }
2069 case HW_VAR_RETRY_LIMIT:{
2070 u8 retry_limit = ((u8 *) (val))[0];
2071
2072 rtl_write_word(rtlpriv, REG_RL,
2073 retry_limit << RETRY_LIMIT_SHORT_SHIFT |
2074 retry_limit << RETRY_LIMIT_LONG_SHIFT);
2075 RT_TRACE(rtlpriv, COMP_MLME, DBG_DMESG, ("Set HW_VAR_R"
2076 "ETRY_LIMIT(0x%08x)\n", retry_limit));
2077 break;
2078 }
2079 case HW_VAR_DUAL_TSF_RST:
2080 rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, (BIT(0) | BIT(1)));
2081 break;
2082 case HW_VAR_EFUSE_BYTES:
2083 rtlefuse->efuse_usedbytes = *((u16 *) val);
2084 break;
2085 case HW_VAR_EFUSE_USAGE:
2086 rtlefuse->efuse_usedpercentage = *((u8 *) val);
2087 break;
2088 case HW_VAR_IO_CMD:
2089 rtl92c_phy_set_io_cmd(hw, (*(enum io_type *)val));
2090 break;
2091 case HW_VAR_WPA_CONFIG:
2092 rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *) val));
2093 break;
2094 case HW_VAR_SET_RPWM:{
2095 u8 rpwm_val = rtl_read_byte(rtlpriv, REG_USB_HRPWM);
2096
2097 if (rpwm_val & BIT(7))
2098 rtl_write_byte(rtlpriv, REG_USB_HRPWM,
2099 (*(u8 *)val));
2100 else
2101 rtl_write_byte(rtlpriv, REG_USB_HRPWM,
2102 ((*(u8 *)val) | BIT(7)));
2103 break;
2104 }
2105 case HW_VAR_H2C_FW_PWRMODE:{
2106 u8 psmode = (*(u8 *) val);
2107
2108 if ((psmode != FW_PS_ACTIVE_MODE) &&
2109 (!IS_92C_SERIAL(rtlhal->version)))
2110 rtl92c_dm_rf_saving(hw, true);
2111 rtl92c_set_fw_pwrmode_cmd(hw, (*(u8 *) val));
2112 break;
2113 }
2114 case HW_VAR_FW_PSMODE_STATUS:
2115 ppsc->fw_current_inpsmode = *((bool *) val);
2116 break;
2117 case HW_VAR_H2C_FW_JOINBSSRPT:{
2118 u8 mstatus = (*(u8 *) val);
2119 u8 tmp_reg422;
2120 bool recover = false;
2121
2122 if (mstatus == RT_MEDIA_CONNECT) {
2123 rtlpriv->cfg->ops->set_hw_reg(hw,
2124 HW_VAR_AID, NULL);
2125 rtl_write_byte(rtlpriv, REG_CR + 1, 0x03);
2126 _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(3));
2127 _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0);
2128 tmp_reg422 = rtl_read_byte(rtlpriv,
2129 REG_FWHW_TXQ_CTRL + 2);
2130 if (tmp_reg422 & BIT(6))
2131 recover = true;
2132 rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
2133 tmp_reg422 & (~BIT(6)));
2134 rtl92c_set_fw_rsvdpagepkt(hw, 0);
2135 _rtl92cu_set_bcn_ctrl_reg(hw, BIT(3), 0);
2136 _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(4));
2137 if (recover)
2138 rtl_write_byte(rtlpriv,
2139 REG_FWHW_TXQ_CTRL + 2,
2140 tmp_reg422 | BIT(6));
2141 rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
2142 }
2143 rtl92c_set_fw_joinbss_report_cmd(hw, (*(u8 *) val));
2144 break;
2145 }
2146 case HW_VAR_AID:{
2147 u16 u2btmp;
2148
2149 u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT);
2150 u2btmp &= 0xC000;
2151 rtl_write_word(rtlpriv, REG_BCN_PSR_RPT,
2152 (u2btmp | mac->assoc_id));
2153 break;
2154 }
2155 case HW_VAR_CORRECT_TSF:{
2156 u8 btype_ibss = ((u8 *) (val))[0];
2157
2158 if (btype_ibss == true)
2159 _rtl92cu_stop_tx_beacon(hw);
2160 _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(3));
2161 rtl_write_dword(rtlpriv, REG_TSFTR, (u32)(mac->tsf &
2162 0xffffffff));
2163 rtl_write_dword(rtlpriv, REG_TSFTR + 4,
2164 (u32)((mac->tsf >> 32) & 0xffffffff));
2165 _rtl92cu_set_bcn_ctrl_reg(hw, BIT(3), 0);
2166 if (btype_ibss == true)
2167 _rtl92cu_resume_tx_beacon(hw);
2168 break;
2169 }
2170 case HW_VAR_MGT_FILTER:
2171 rtl_write_word(rtlpriv, REG_RXFLTMAP0, *(u16 *)val);
2172 break;
2173 case HW_VAR_CTRL_FILTER:
2174 rtl_write_word(rtlpriv, REG_RXFLTMAP1, *(u16 *)val);
2175 break;
2176 case HW_VAR_DATA_FILTER:
2177 rtl_write_word(rtlpriv, REG_RXFLTMAP2, *(u16 *)val);
2178 break;
2179 default:
2180 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("switch case "
2181 "not process\n"));
2182 break;
2183 }
2184}
2185
2186void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw)
2187{
2188 struct rtl_priv *rtlpriv = rtl_priv(hw);
2189 struct rtl_phy *rtlphy = &(rtlpriv->phy);
2190 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
2191 u32 ratr_value = (u32) mac->basic_rates;
2192 u8 *mcsrate = mac->mcs;
2193 u8 ratr_index = 0;
2194 u8 nmode = mac->ht_enable;
2195 u8 mimo_ps = 1;
2196 u16 shortgi_rate = 0;
2197 u32 tmp_ratr_value = 0;
2198 u8 curtxbw_40mhz = mac->bw_40;
2199 u8 curshortgi_40mhz = mac->sgi_40;
2200 u8 curshortgi_20mhz = mac->sgi_20;
2201 enum wireless_mode wirelessmode = mac->mode;
2202
2203 ratr_value |= ((*(u16 *) (mcsrate))) << 12;
2204 switch (wirelessmode) {
2205 case WIRELESS_MODE_B:
2206 if (ratr_value & 0x0000000c)
2207 ratr_value &= 0x0000000d;
2208 else
2209 ratr_value &= 0x0000000f;
2210 break;
2211 case WIRELESS_MODE_G:
2212 ratr_value &= 0x00000FF5;
2213 break;
2214 case WIRELESS_MODE_N_24G:
2215 case WIRELESS_MODE_N_5G:
2216 nmode = 1;
2217 if (mimo_ps == 0) {
2218 ratr_value &= 0x0007F005;
2219 } else {
2220 u32 ratr_mask;
2221
2222 if (get_rf_type(rtlphy) == RF_1T2R ||
2223 get_rf_type(rtlphy) == RF_1T1R)
2224 ratr_mask = 0x000ff005;
2225 else
2226 ratr_mask = 0x0f0ff005;
2227 if (curtxbw_40mhz)
2228 ratr_mask |= 0x00000010;
2229 ratr_value &= ratr_mask;
2230 }
2231 break;
2232 default:
2233 if (rtlphy->rf_type == RF_1T2R)
2234 ratr_value &= 0x000ff0ff;
2235 else
2236 ratr_value &= 0x0f0ff0ff;
2237 break;
2238 }
2239 ratr_value &= 0x0FFFFFFF;
2240 if (nmode && ((curtxbw_40mhz && curshortgi_40mhz) ||
2241 (!curtxbw_40mhz && curshortgi_20mhz))) {
2242 ratr_value |= 0x10000000;
2243 tmp_ratr_value = (ratr_value >> 12);
2244 for (shortgi_rate = 15; shortgi_rate > 0; shortgi_rate--) {
2245 if ((1 << shortgi_rate) & tmp_ratr_value)
2246 break;
2247 }
2248 shortgi_rate = (shortgi_rate << 12) | (shortgi_rate << 8) |
2249 (shortgi_rate << 4) | (shortgi_rate);
2250 }
2251 rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
2252 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, ("%x\n", rtl_read_dword(rtlpriv,
2253 REG_ARFR0)));
2254}
2255
2256void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
2257{
2258 struct rtl_priv *rtlpriv = rtl_priv(hw);
2259 struct rtl_phy *rtlphy = &(rtlpriv->phy);
2260 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
2261 u32 ratr_bitmap = (u32) mac->basic_rates;
2262 u8 *p_mcsrate = mac->mcs;
2263 u8 ratr_index = 0;
2264 u8 curtxbw_40mhz = mac->bw_40;
2265 u8 curshortgi_40mhz = mac->sgi_40;
2266 u8 curshortgi_20mhz = mac->sgi_20;
2267 enum wireless_mode wirelessmode = mac->mode;
2268 bool shortgi = false;
2269 u8 rate_mask[5];
2270 u8 macid = 0;
2271 u8 mimops = 1;
2272
2273 ratr_bitmap |= (p_mcsrate[1] << 20) | (p_mcsrate[0] << 12);
2274 switch (wirelessmode) {
2275 case WIRELESS_MODE_B:
2276 ratr_index = RATR_INX_WIRELESS_B;
2277 if (ratr_bitmap & 0x0000000c)
2278 ratr_bitmap &= 0x0000000d;
2279 else
2280 ratr_bitmap &= 0x0000000f;
2281 break;
2282 case WIRELESS_MODE_G:
2283 ratr_index = RATR_INX_WIRELESS_GB;
2284 if (rssi_level == 1)
2285 ratr_bitmap &= 0x00000f00;
2286 else if (rssi_level == 2)
2287 ratr_bitmap &= 0x00000ff0;
2288 else
2289 ratr_bitmap &= 0x00000ff5;
2290 break;
2291 case WIRELESS_MODE_A:
2292 ratr_index = RATR_INX_WIRELESS_A;
2293 ratr_bitmap &= 0x00000ff0;
2294 break;
2295 case WIRELESS_MODE_N_24G:
2296 case WIRELESS_MODE_N_5G:
2297 ratr_index = RATR_INX_WIRELESS_NGB;
2298 if (mimops == 0) {
2299 if (rssi_level == 1)
2300 ratr_bitmap &= 0x00070000;
2301 else if (rssi_level == 2)
2302 ratr_bitmap &= 0x0007f000;
2303 else
2304 ratr_bitmap &= 0x0007f005;
2305 } else {
2306 if (rtlphy->rf_type == RF_1T2R ||
2307 rtlphy->rf_type == RF_1T1R) {
2308 if (curtxbw_40mhz) {
2309 if (rssi_level == 1)
2310 ratr_bitmap &= 0x000f0000;
2311 else if (rssi_level == 2)
2312 ratr_bitmap &= 0x000ff000;
2313 else
2314 ratr_bitmap &= 0x000ff015;
2315 } else {
2316 if (rssi_level == 1)
2317 ratr_bitmap &= 0x000f0000;
2318 else if (rssi_level == 2)
2319 ratr_bitmap &= 0x000ff000;
2320 else
2321 ratr_bitmap &= 0x000ff005;
2322 }
2323 } else {
2324 if (curtxbw_40mhz) {
2325 if (rssi_level == 1)
2326 ratr_bitmap &= 0x0f0f0000;
2327 else if (rssi_level == 2)
2328 ratr_bitmap &= 0x0f0ff000;
2329 else
2330 ratr_bitmap &= 0x0f0ff015;
2331 } else {
2332 if (rssi_level == 1)
2333 ratr_bitmap &= 0x0f0f0000;
2334 else if (rssi_level == 2)
2335 ratr_bitmap &= 0x0f0ff000;
2336 else
2337 ratr_bitmap &= 0x0f0ff005;
2338 }
2339 }
2340 }
2341 if ((curtxbw_40mhz && curshortgi_40mhz) ||
2342 (!curtxbw_40mhz && curshortgi_20mhz)) {
2343 if (macid == 0)
2344 shortgi = true;
2345 else if (macid == 1)
2346 shortgi = false;
2347 }
2348 break;
2349 default:
2350 ratr_index = RATR_INX_WIRELESS_NGB;
2351 if (rtlphy->rf_type == RF_1T2R)
2352 ratr_bitmap &= 0x000ff0ff;
2353 else
2354 ratr_bitmap &= 0x0f0ff0ff;
2355 break;
2356 }
2357 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, ("ratr_bitmap :%x\n",
2358 ratr_bitmap));
2359 *(u32 *)&rate_mask = ((ratr_bitmap & 0x0fffffff) |
2360 ratr_index << 28);
2361 rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
2362 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, ("Rate_index:%x, "
2363 "ratr_val:%x, %x:%x:%x:%x:%x\n",
2364 ratr_index, ratr_bitmap,
2365 rate_mask[0], rate_mask[1],
2366 rate_mask[2], rate_mask[3],
2367 rate_mask[4]));
2368 rtl92c_fill_h2c_cmd(hw, H2C_RA_MASK, 5, rate_mask);
2369}
2370
2371void rtl92cu_update_channel_access_setting(struct ieee80211_hw *hw)
2372{
2373 struct rtl_priv *rtlpriv = rtl_priv(hw);
2374 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
2375 u16 sifs_timer;
2376
2377 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
2378 (u8 *)&mac->slot_time);
2379 if (!mac->ht_enable)
2380 sifs_timer = 0x0a0a;
2381 else
2382 sifs_timer = 0x0e0e;
2383 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *)&sifs_timer);
2384}
2385
2386bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
2387{
2388 struct rtl_priv *rtlpriv = rtl_priv(hw);
2389 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
2390 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
2391 enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate;
2392 u8 u1tmp = 0;
2393 bool actuallyset = false;
2394 unsigned long flag = 0;
2395 /* to do - usb autosuspend */
2396 u8 usb_autosuspend = 0;
2397
2398 if (ppsc->swrf_processing)
2399 return false;
2400 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
2401 if (ppsc->rfchange_inprogress) {
2402 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
2403 return false;
2404 } else {
2405 ppsc->rfchange_inprogress = true;
2406 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
2407 }
2408 cur_rfstate = ppsc->rfpwr_state;
2409 if (usb_autosuspend) {
2410 /* to do................... */
2411 } else {
2412 if (ppsc->pwrdown_mode) {
2413 u1tmp = rtl_read_byte(rtlpriv, REG_HSISR);
2414 e_rfpowerstate_toset = (u1tmp & BIT(7)) ?
2415 ERFOFF : ERFON;
2416 RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
2417 ("pwrdown, 0x5c(BIT7)=%02x\n", u1tmp));
2418 } else {
2419 rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG,
2420 rtl_read_byte(rtlpriv,
2421 REG_MAC_PINMUX_CFG) & ~(BIT(3)));
2422 u1tmp = rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL);
2423 e_rfpowerstate_toset = (u1tmp & BIT(3)) ?
2424 ERFON : ERFOFF;
2425 RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
2426 ("GPIO_IN=%02x\n", u1tmp));
2427 }
2428 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, ("N-SS RF =%x\n",
2429 e_rfpowerstate_toset));
2430 }
2431 if ((ppsc->hwradiooff) && (e_rfpowerstate_toset == ERFON)) {
2432 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, ("GPIOChangeRF - HW "
2433 "Radio ON, RF ON\n"));
2434 ppsc->hwradiooff = false;
2435 actuallyset = true;
2436 } else if ((!ppsc->hwradiooff) && (e_rfpowerstate_toset ==
2437 ERFOFF)) {
2438 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, ("GPIOChangeRF - HW"
2439 " Radio OFF\n"));
2440 ppsc->hwradiooff = true;
2441 actuallyset = true;
2442 } else {
2443 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD ,
2444 ("pHalData->bHwRadioOff and eRfPowerStateToSet do not"
2445 " match: pHalData->bHwRadioOff %x, eRfPowerStateToSet "
2446 "%x\n", ppsc->hwradiooff, e_rfpowerstate_toset));
2447 }
2448 if (actuallyset) {
2449 ppsc->hwradiooff = 1;
2450 if (e_rfpowerstate_toset == ERFON) {
2451 if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) &&
2452 RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM))
2453 RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM);
2454 else if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_PCI_D3)
2455 && RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_PCI_D3))
2456 RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_PCI_D3);
2457 }
2458 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
2459 ppsc->rfchange_inprogress = false;
2460 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
2461 /* For power down module, we need to enable register block
2462 * contrl reg at 0x1c. Then enable power down control bit
2463 * of register 0x04 BIT4 and BIT15 as 1.
2464 */
2465 if (ppsc->pwrdown_mode && e_rfpowerstate_toset == ERFOFF) {
2466 /* Enable register area 0x0-0xc. */
2467 rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0);
2468 if (IS_HARDWARE_TYPE_8723U(rtlhal)) {
2469 /*
2470 * We should configure HW PDn source for WiFi
2471 * ONLY, and then our HW will be set in
2472 * power-down mode if PDn source from all
2473 * functions are configured.
2474 */
2475 u1tmp = rtl_read_byte(rtlpriv,
2476 REG_MULTI_FUNC_CTRL);
2477 rtl_write_byte(rtlpriv, REG_MULTI_FUNC_CTRL,
2478 (u1tmp|WL_HWPDN_EN));
2479 } else {
2480 rtl_write_word(rtlpriv, REG_APS_FSMCO, 0x8812);
2481 }
2482 }
2483 if (e_rfpowerstate_toset == ERFOFF) {
2484 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM)
2485 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM);
2486 else if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_PCI_D3)
2487 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_PCI_D3);
2488 }
2489 } else if (e_rfpowerstate_toset == ERFOFF || cur_rfstate == ERFOFF) {
2490 /* Enter D3 or ASPM after GPIO had been done. */
2491 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM)
2492 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM);
2493 else if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_PCI_D3)
2494 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_PCI_D3);
2495 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
2496 ppsc->rfchange_inprogress = false;
2497 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
2498 } else {
2499 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
2500 ppsc->rfchange_inprogress = false;
2501 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
2502 }
2503 *valid = 1;
2504 return !ppsc->hwradiooff;
2505}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
new file mode 100644
index 00000000000..3c0ea5ea6db
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
@@ -0,0 +1,107 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL92CU_HW_H__
31#define __RTL92CU_HW_H__
32
33#define LLT_POLLING_LLT_THRESHOLD 20
34#define LLT_POLLING_READY_TIMEOUT_COUNT 100
35#define LLT_LAST_ENTRY_OF_TX_PKT_BUFFER 255
36
37#define RX_PAGE_SIZE_REG_VALUE PBP_128
38/* Note: We will divide number of page equally for each queue
39 * other than public queue! */
40#define TX_TOTAL_PAGE_NUMBER 0xF8
41#define TX_PAGE_BOUNDARY (TX_TOTAL_PAGE_NUMBER + 1)
42
43
44#define CHIP_B_PAGE_NUM_PUBQ 0xE7
45
46/* For Test Chip Setting
47 * (HPQ + LPQ + PUBQ) shall be TX_TOTAL_PAGE_NUMBER */
48#define CHIP_A_PAGE_NUM_PUBQ 0x7E
49
50
51/* For Chip A Setting */
52#define WMM_CHIP_A_TX_TOTAL_PAGE_NUMBER 0xF5
53#define WMM_CHIP_A_TX_PAGE_BOUNDARY \
54 (WMM_CHIP_A_TX_TOTAL_PAGE_NUMBER + 1) /* F6 */
55
56#define WMM_CHIP_A_PAGE_NUM_PUBQ 0xA3
57#define WMM_CHIP_A_PAGE_NUM_HPQ 0x29
58#define WMM_CHIP_A_PAGE_NUM_LPQ 0x29
59
60
61
62/* Note: For Chip B Setting ,modify later */
63#define WMM_CHIP_B_TX_TOTAL_PAGE_NUMBER 0xF5
64#define WMM_CHIP_B_TX_PAGE_BOUNDARY \
65 (WMM_CHIP_B_TX_TOTAL_PAGE_NUMBER + 1) /* F6 */
66
67#define WMM_CHIP_B_PAGE_NUM_PUBQ 0xB0
68#define WMM_CHIP_B_PAGE_NUM_HPQ 0x29
69#define WMM_CHIP_B_PAGE_NUM_LPQ 0x1C
70#define WMM_CHIP_B_PAGE_NUM_NPQ 0x1C
71
72#define BOARD_TYPE_NORMAL_MASK 0xE0
73#define BOARD_TYPE_TEST_MASK 0x0F
74
75/* should be renamed and moved to another file */
76enum _BOARD_TYPE_8192CUSB {
77 BOARD_USB_DONGLE = 0, /* USB dongle */
78 BOARD_USB_High_PA = 1, /* USB dongle - high power PA */
79 BOARD_MINICARD = 2, /* Minicard */
80 BOARD_USB_SOLO = 3, /* USB solo-Slim module */
81 BOARD_USB_COMBO = 4, /* USB Combo-Slim module */
82};
83
84#define IS_HIGHT_PA(boardtype) \
85 ((boardtype == BOARD_USB_High_PA) ? true : false)
86
87#define RTL92C_DRIVER_INFO_SIZE 4
88void rtl92cu_read_eeprom_info(struct ieee80211_hw *hw);
89void rtl92cu_enable_hw_security_config(struct ieee80211_hw *hw);
90int rtl92cu_hw_init(struct ieee80211_hw *hw);
91void rtl92cu_card_disable(struct ieee80211_hw *hw);
92int rtl92cu_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type);
93void rtl92cu_set_beacon_related_registers(struct ieee80211_hw *hw);
94void rtl92cu_set_beacon_interval(struct ieee80211_hw *hw);
95void rtl92cu_update_interrupt_mask(struct ieee80211_hw *hw,
96 u32 add_msr, u32 rm_msr);
97void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
98void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
99void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw);
100void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level);
101
102void rtl92cu_update_channel_access_setting(struct ieee80211_hw *hw);
103bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid);
104void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid);
105u8 _rtl92c_get_chnl_group(u8 chnl);
106
107#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/led.c b/drivers/net/wireless/rtlwifi/rtl8192cu/led.c
new file mode 100644
index 00000000000..332c74348a6
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/led.c
@@ -0,0 +1,142 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 *****************************************************************************/
27
28#include "../wifi.h"
29#include "../usb.h"
30#include "reg.h"
31#include "led.h"
32
33static void _rtl92cu_init_led(struct ieee80211_hw *hw,
34 struct rtl_led *pled, enum rtl_led_pin ledpin)
35{
36 pled->hw = hw;
37 pled->ledpin = ledpin;
38 pled->ledon = false;
39}
40
41static void _rtl92cu_deInit_led(struct rtl_led *pled)
42{
43}
44
45void rtl92cu_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
46{
47 u8 ledcfg;
48 struct rtl_priv *rtlpriv = rtl_priv(hw);
49
50 RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
51 ("LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin));
52 ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
53 switch (pled->ledpin) {
54 case LED_PIN_GPIO0:
55 break;
56 case LED_PIN_LED0:
57 rtl_write_byte(rtlpriv,
58 REG_LEDCFG2, (ledcfg & 0xf0) | BIT(5) | BIT(6));
59 break;
60 case LED_PIN_LED1:
61 rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg & 0x0f) | BIT(5));
62 break;
63 default:
64 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
65 ("switch case not process\n"));
66 break;
67 }
68 pled->ledon = true;
69}
70
71void rtl92cu_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
72{
73 struct rtl_priv *rtlpriv = rtl_priv(hw);
74 struct rtl_usb_priv *usbpriv = rtl_usbpriv(hw);
75 u8 ledcfg;
76
77 RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
78 ("LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin));
79 ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
80 switch (pled->ledpin) {
81 case LED_PIN_GPIO0:
82 break;
83 case LED_PIN_LED0:
84 ledcfg &= 0xf0;
85 if (usbpriv->ledctl.led_opendrain == true)
86 rtl_write_byte(rtlpriv, REG_LEDCFG2,
87 (ledcfg | BIT(1) | BIT(5) | BIT(6)));
88 else
89 rtl_write_byte(rtlpriv, REG_LEDCFG2,
90 (ledcfg | BIT(3) | BIT(5) | BIT(6)));
91 break;
92 case LED_PIN_LED1:
93 ledcfg &= 0x0f;
94 rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg | BIT(3)));
95 break;
96 default:
97 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
98 ("switch case not process\n"));
99 break;
100 }
101 pled->ledon = false;
102}
103
104void rtl92cu_init_sw_leds(struct ieee80211_hw *hw)
105{
106 struct rtl_usb_priv *usbpriv = rtl_usbpriv(hw);
107 _rtl92cu_init_led(hw, &(usbpriv->ledctl.sw_led0), LED_PIN_LED0);
108 _rtl92cu_init_led(hw, &(usbpriv->ledctl.sw_led1), LED_PIN_LED1);
109}
110
111void rtl92cu_deinit_sw_leds(struct ieee80211_hw *hw)
112{
113 struct rtl_usb_priv *usbpriv = rtl_usbpriv(hw);
114 _rtl92cu_deInit_led(&(usbpriv->ledctl.sw_led0));
115 _rtl92cu_deInit_led(&(usbpriv->ledctl.sw_led1));
116}
117
118static void _rtl92cu_sw_led_control(struct ieee80211_hw *hw,
119 enum led_ctl_mode ledaction)
120{
121}
122
123void rtl92cu_led_control(struct ieee80211_hw *hw,
124 enum led_ctl_mode ledaction)
125{
126 struct rtl_priv *rtlpriv = rtl_priv(hw);
127 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
128
129 if ((ppsc->rfoff_reason > RF_CHANGE_BY_PS) &&
130 (ledaction == LED_CTL_TX ||
131 ledaction == LED_CTL_RX ||
132 ledaction == LED_CTL_SITE_SURVEY ||
133 ledaction == LED_CTL_LINK ||
134 ledaction == LED_CTL_NO_LINK ||
135 ledaction == LED_CTL_START_TO_LINK ||
136 ledaction == LED_CTL_POWER_ON)) {
137 return;
138 }
139 RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, ("ledaction %d,\n",
140 ledaction));
141 _rtl92cu_sw_led_control(hw, ledaction);
142}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/led.h b/drivers/net/wireless/rtlwifi/rtl8192cu/led.h
new file mode 100644
index 00000000000..decaee4d1eb
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/led.h
@@ -0,0 +1,37 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 *****************************************************************************/
27
28#ifndef __RTL92CU_LED_H__
29#define __RTL92CU_LED_H__
30
31void rtl92cu_init_sw_leds(struct ieee80211_hw *hw);
32void rtl92cu_deinit_sw_leds(struct ieee80211_hw *hw);
33void rtl92cu_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled);
34void rtl92cu_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled);
35void rtl92cu_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction);
36
37#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
new file mode 100644
index 00000000000..f8514cba17b
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -0,0 +1,1144 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28****************************************************************************/
29#include <linux/module.h>
30
31#include "../wifi.h"
32#include "../pci.h"
33#include "../usb.h"
34#include "../ps.h"
35#include "../cam.h"
36#include "reg.h"
37#include "def.h"
38#include "phy.h"
39#include "rf.h"
40#include "dm.h"
41#include "mac.h"
42#include "trx.h"
43
44/* macro to shorten lines */
45
46#define LINK_Q ui_link_quality
47#define RX_EVM rx_evm_percentage
48#define RX_SIGQ rx_mimo_signalquality
49
50
51void rtl92c_read_chip_version(struct ieee80211_hw *hw)
52{
53 struct rtl_priv *rtlpriv = rtl_priv(hw);
54 struct rtl_phy *rtlphy = &(rtlpriv->phy);
55 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
56 enum version_8192c chip_version = VERSION_UNKNOWN;
57 u32 value32;
58
59 value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG);
60 if (value32 & TRP_VAUX_EN) {
61 chip_version = (value32 & TYPE_ID) ? VERSION_TEST_CHIP_92C :
62 VERSION_TEST_CHIP_88C;
63 } else {
64 /* Normal mass production chip. */
65 chip_version = NORMAL_CHIP;
66 chip_version |= ((value32 & TYPE_ID) ? CHIP_92C : 0);
67 chip_version |= ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : 0);
68 /* RTL8723 with BT function. */
69 chip_version |= ((value32 & BT_FUNC) ? CHIP_8723 : 0);
70 if (IS_VENDOR_UMC(chip_version))
71 chip_version |= ((value32 & CHIP_VER_RTL_MASK) ?
72 CHIP_VENDOR_UMC_B_CUT : 0);
73 if (IS_92C_SERIAL(chip_version)) {
74 value32 = rtl_read_dword(rtlpriv, REG_HPON_FSM);
75 chip_version |= ((CHIP_BONDING_IDENTIFIER(value32) ==
76 CHIP_BONDING_92C_1T2R) ? CHIP_92C_1T2R : 0);
77 } else if (IS_8723_SERIES(chip_version)) {
78 value32 = rtl_read_dword(rtlpriv, REG_GPIO_OUTSTS);
79 chip_version |= ((value32 & RF_RL_ID) ?
80 CHIP_8723_DRV_REV : 0);
81 }
82 }
83 rtlhal->version = (enum version_8192c)chip_version;
84 switch (rtlhal->version) {
85 case VERSION_NORMAL_TSMC_CHIP_92C_1T2R:
86 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
87 ("Chip Version ID: VERSION_B_CHIP_92C.\n"));
88 break;
89 case VERSION_NORMAL_TSMC_CHIP_92C:
90 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
91 ("Chip Version ID: VERSION_NORMAL_TSMC_CHIP_92C.\n"));
92 break;
93 case VERSION_NORMAL_TSMC_CHIP_88C:
94 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
95 ("Chip Version ID: VERSION_NORMAL_TSMC_CHIP_88C.\n"));
96 break;
97 case VERSION_NORMAL_UMC_CHIP_92C_1T2R_A_CUT:
98 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
99 ("Chip Version ID: VERSION_NORMAL_UMC_CHIP_i"
100 "92C_1T2R_A_CUT.\n"));
101 break;
102 case VERSION_NORMAL_UMC_CHIP_92C_A_CUT:
103 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
104 ("Chip Version ID: VERSION_NORMAL_UMC_CHIP_"
105 "92C_A_CUT.\n"));
106 break;
107 case VERSION_NORMAL_UMC_CHIP_88C_A_CUT:
108 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
109 ("Chip Version ID: VERSION_NORMAL_UMC_CHIP"
110 "_88C_A_CUT.\n"));
111 break;
112 case VERSION_NORMAL_UMC_CHIP_92C_1T2R_B_CUT:
113 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
114 ("Chip Version ID: VERSION_NORMAL_UMC_CHIP"
115 "_92C_1T2R_B_CUT.\n"));
116 break;
117 case VERSION_NORMAL_UMC_CHIP_92C_B_CUT:
118 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
119 ("Chip Version ID: VERSION_NORMAL_UMC_CHIP"
120 "_92C_B_CUT.\n"));
121 break;
122 case VERSION_NORMAL_UMC_CHIP_88C_B_CUT:
123 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
124 ("Chip Version ID: VERSION_NORMAL_UMC_CHIP"
125 "_88C_B_CUT.\n"));
126 break;
127 case VERSION_NORMA_UMC_CHIP_8723_1T1R_A_CUT:
128 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
129 ("Chip Version ID: VERSION_NORMA_UMC_CHIP"
130 "_8723_1T1R_A_CUT.\n"));
131 break;
132 case VERSION_NORMA_UMC_CHIP_8723_1T1R_B_CUT:
133 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
134 ("Chip Version ID: VERSION_NORMA_UMC_CHIP"
135 "_8723_1T1R_B_CUT.\n"));
136 break;
137 case VERSION_TEST_CHIP_92C:
138 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
139 ("Chip Version ID: VERSION_TEST_CHIP_92C.\n"));
140 break;
141 case VERSION_TEST_CHIP_88C:
142 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
143 ("Chip Version ID: VERSION_TEST_CHIP_88C.\n"));
144 break;
145 default:
146 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
147 ("Chip Version ID: ???????????????.\n"));
148 break;
149 }
150 if (IS_92C_SERIAL(rtlhal->version))
151 rtlphy->rf_type =
152 (IS_92C_1T2R(rtlhal->version)) ? RF_1T2R : RF_2T2R;
153 else
154 rtlphy->rf_type = RF_1T1R;
155 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
156 ("Chip RF Type: %s\n", (rtlphy->rf_type == RF_2T2R) ?
157 "RF_2T2R" : "RF_1T1R"));
158 if (get_rf_type(rtlphy) == RF_1T1R)
159 rtlpriv->dm.rfpath_rxenable[0] = true;
160 else
161 rtlpriv->dm.rfpath_rxenable[0] =
162 rtlpriv->dm.rfpath_rxenable[1] = true;
163 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("VersionID = 0x%4x\n",
164 rtlhal->version));
165}
166
167/**
168 * writeLLT - LLT table write access
169 * @io: io callback
170 * @address: LLT logical address.
171 * @data: LLT data content
172 *
173 * Realtek hardware access function.
174 *
175 */
176bool rtl92c_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
177{
178 struct rtl_priv *rtlpriv = rtl_priv(hw);
179 bool status = true;
180 long count = 0;
181 u32 value = _LLT_INIT_ADDR(address) |
182 _LLT_INIT_DATA(data) | _LLT_OP(_LLT_WRITE_ACCESS);
183
184 rtl_write_dword(rtlpriv, REG_LLT_INIT, value);
185 do {
186 value = rtl_read_dword(rtlpriv, REG_LLT_INIT);
187 if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value))
188 break;
189 if (count > POLLING_LLT_THRESHOLD) {
190 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
191 ("Failed to polling write LLT done at"
192 " address %d! _LLT_OP_VALUE(%x)\n",
193 address, _LLT_OP_VALUE(value)));
194 status = false;
195 break;
196 }
197 } while (++count);
198 return status;
199}
200/**
201 * rtl92c_init_LLT_table - Init LLT table
202 * @io: io callback
203 * @boundary:
204 *
205 * Realtek hardware access function.
206 *
207 */
208bool rtl92c_init_llt_table(struct ieee80211_hw *hw, u32 boundary)
209{
210 bool rst = true;
211 u32 i;
212
213 for (i = 0; i < (boundary - 1); i++) {
214 rst = rtl92c_llt_write(hw, i , i + 1);
215 if (true != rst) {
216 printk(KERN_ERR "===> %s #1 fail\n", __func__);
217 return rst;
218 }
219 }
220 /* end of list */
221 rst = rtl92c_llt_write(hw, (boundary - 1), 0xFF);
222 if (true != rst) {
223 printk(KERN_ERR "===> %s #2 fail\n", __func__);
224 return rst;
225 }
226 /* Make the other pages as ring buffer
227 * This ring buffer is used as beacon buffer if we config this MAC
228 * as two MAC transfer.
229 * Otherwise used as local loopback buffer.
230 */
231 for (i = boundary; i < LLT_LAST_ENTRY_OF_TX_PKT_BUFFER; i++) {
232 rst = rtl92c_llt_write(hw, i, (i + 1));
233 if (true != rst) {
234 printk(KERN_ERR "===> %s #3 fail\n", __func__);
235 return rst;
236 }
237 }
238 /* Let last entry point to the start entry of ring buffer */
239 rst = rtl92c_llt_write(hw, LLT_LAST_ENTRY_OF_TX_PKT_BUFFER, boundary);
240 if (true != rst) {
241 printk(KERN_ERR "===> %s #4 fail\n", __func__);
242 return rst;
243 }
244 return rst;
245}
246void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index,
247 u8 *p_macaddr, bool is_group, u8 enc_algo,
248 bool is_wepkey, bool clear_all)
249{
250 struct rtl_priv *rtlpriv = rtl_priv(hw);
251 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
252 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
253 u8 *macaddr = p_macaddr;
254 u32 entry_id = 0;
255 bool is_pairwise = false;
256 static u8 cam_const_addr[4][6] = {
257 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
258 {0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
259 {0x00, 0x00, 0x00, 0x00, 0x00, 0x02},
260 {0x00, 0x00, 0x00, 0x00, 0x00, 0x03}
261 };
262 static u8 cam_const_broad[] = {
263 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
264 };
265
266 if (clear_all) {
267 u8 idx = 0;
268 u8 cam_offset = 0;
269 u8 clear_number = 5;
270
271 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("clear_all\n"));
272 for (idx = 0; idx < clear_number; idx++) {
273 rtl_cam_mark_invalid(hw, cam_offset + idx);
274 rtl_cam_empty_entry(hw, cam_offset + idx);
275 if (idx < 5) {
276 memset(rtlpriv->sec.key_buf[idx], 0,
277 MAX_KEY_LEN);
278 rtlpriv->sec.key_len[idx] = 0;
279 }
280 }
281 } else {
282 switch (enc_algo) {
283 case WEP40_ENCRYPTION:
284 enc_algo = CAM_WEP40;
285 break;
286 case WEP104_ENCRYPTION:
287 enc_algo = CAM_WEP104;
288 break;
289 case TKIP_ENCRYPTION:
290 enc_algo = CAM_TKIP;
291 break;
292 case AESCCMP_ENCRYPTION:
293 enc_algo = CAM_AES;
294 break;
295 default:
296 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
297 ("iillegal switch case\n"));
298 enc_algo = CAM_TKIP;
299 break;
300 }
301 if (is_wepkey || rtlpriv->sec.use_defaultkey) {
302 macaddr = cam_const_addr[key_index];
303 entry_id = key_index;
304 } else {
305 if (is_group) {
306 macaddr = cam_const_broad;
307 entry_id = key_index;
308 } else {
309 key_index = PAIRWISE_KEYIDX;
310 entry_id = CAM_PAIRWISE_KEY_POSITION;
311 is_pairwise = true;
312 }
313 }
314 if (rtlpriv->sec.key_len[key_index] == 0) {
315 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
316 ("delete one entry\n"));
317 rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
318 } else {
319 RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
320 ("The insert KEY length is %d\n",
321 rtlpriv->sec.key_len[PAIRWISE_KEYIDX]));
322 RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
323 ("The insert KEY is %x %x\n",
324 rtlpriv->sec.key_buf[0][0],
325 rtlpriv->sec.key_buf[0][1]));
326 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
327 ("add one entry\n"));
328 if (is_pairwise) {
329 RT_PRINT_DATA(rtlpriv, COMP_SEC, DBG_LOUD,
330 "Pairwiase Key content :",
331 rtlpriv->sec.pairwise_key,
332 rtlpriv->sec.
333 key_len[PAIRWISE_KEYIDX]);
334 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
335 ("set Pairwiase key\n"));
336
337 rtl_cam_add_one_entry(hw, macaddr, key_index,
338 entry_id, enc_algo,
339 CAM_CONFIG_NO_USEDK,
340 rtlpriv->sec.
341 key_buf[key_index]);
342 } else {
343 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
344 ("set group key\n"));
345 if (mac->opmode == NL80211_IFTYPE_ADHOC) {
346 rtl_cam_add_one_entry(hw,
347 rtlefuse->dev_addr,
348 PAIRWISE_KEYIDX,
349 CAM_PAIRWISE_KEY_POSITION,
350 enc_algo,
351 CAM_CONFIG_NO_USEDK,
352 rtlpriv->sec.key_buf
353 [entry_id]);
354 }
355 rtl_cam_add_one_entry(hw, macaddr, key_index,
356 entry_id, enc_algo,
357 CAM_CONFIG_NO_USEDK,
358 rtlpriv->sec.key_buf[entry_id]);
359 }
360 }
361 }
362}
363
364u32 rtl92c_get_txdma_status(struct ieee80211_hw *hw)
365{
366 struct rtl_priv *rtlpriv = rtl_priv(hw);
367
368 return rtl_read_dword(rtlpriv, REG_TXDMA_STATUS);
369}
370
371void rtl92c_enable_interrupt(struct ieee80211_hw *hw)
372{
373 struct rtl_priv *rtlpriv = rtl_priv(hw);
374 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
375 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
376 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
377
378 if (IS_HARDWARE_TYPE_8192CE(rtlhal)) {
379 rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] &
380 0xFFFFFFFF);
381 rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] &
382 0xFFFFFFFF);
383 rtlpci->irq_enabled = true;
384 } else {
385 rtl_write_dword(rtlpriv, REG_HIMR, rtlusb->irq_mask[0] &
386 0xFFFFFFFF);
387 rtl_write_dword(rtlpriv, REG_HIMRE, rtlusb->irq_mask[1] &
388 0xFFFFFFFF);
389 rtlusb->irq_enabled = true;
390 }
391}
392
393void rtl92c_init_interrupt(struct ieee80211_hw *hw)
394{
395 rtl92c_enable_interrupt(hw);
396}
397
398void rtl92c_disable_interrupt(struct ieee80211_hw *hw)
399{
400 struct rtl_priv *rtlpriv = rtl_priv(hw);
401 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
402 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
403 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
404
405 rtl_write_dword(rtlpriv, REG_HIMR, IMR8190_DISABLED);
406 rtl_write_dword(rtlpriv, REG_HIMRE, IMR8190_DISABLED);
407 if (IS_HARDWARE_TYPE_8192CE(rtlhal))
408 rtlpci->irq_enabled = false;
409 else if (IS_HARDWARE_TYPE_8192CU(rtlhal))
410 rtlusb->irq_enabled = false;
411}
412
413void rtl92c_set_qos(struct ieee80211_hw *hw, int aci)
414{
415 struct rtl_priv *rtlpriv = rtl_priv(hw);
416 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
417 u32 u4b_ac_param;
418
419 rtl92c_dm_init_edca_turbo(hw);
420 u4b_ac_param = (u32) mac->ac[aci].aifs;
421 u4b_ac_param |=
422 ((u32) le16_to_cpu(mac->ac[aci].cw_min) & 0xF) <<
423 AC_PARAM_ECW_MIN_OFFSET;
424 u4b_ac_param |=
425 ((u32) le16_to_cpu(mac->ac[aci].cw_max) & 0xF) <<
426 AC_PARAM_ECW_MAX_OFFSET;
427 u4b_ac_param |= (u32) le16_to_cpu(mac->ac[aci].tx_op) <<
428 AC_PARAM_TXOP_OFFSET;
429 RT_TRACE(rtlpriv, COMP_QOS, DBG_LOUD,
430 ("queue:%x, ac_param:%x\n", aci, u4b_ac_param));
431 switch (aci) {
432 case AC1_BK:
433 rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, u4b_ac_param);
434 break;
435 case AC0_BE:
436 rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, u4b_ac_param);
437 break;
438 case AC2_VI:
439 rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, u4b_ac_param);
440 break;
441 case AC3_VO:
442 rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, u4b_ac_param);
443 break;
444 default:
445 RT_ASSERT(false, ("invalid aci: %d !\n", aci));
446 break;
447 }
448}
449
450/*-------------------------------------------------------------------------
451 * HW MAC Address
452 *-------------------------------------------------------------------------*/
453void rtl92c_set_mac_addr(struct ieee80211_hw *hw, const u8 *addr)
454{
455 u32 i;
456 struct rtl_priv *rtlpriv = rtl_priv(hw);
457
458 for (i = 0 ; i < ETH_ALEN ; i++)
459 rtl_write_byte(rtlpriv, (REG_MACID + i), *(addr+i));
460
461 RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG, ("MAC Address: %02X-%02X-%02X-"
462 "%02X-%02X-%02X\n",
463 rtl_read_byte(rtlpriv, REG_MACID),
464 rtl_read_byte(rtlpriv, REG_MACID+1),
465 rtl_read_byte(rtlpriv, REG_MACID+2),
466 rtl_read_byte(rtlpriv, REG_MACID+3),
467 rtl_read_byte(rtlpriv, REG_MACID+4),
468 rtl_read_byte(rtlpriv, REG_MACID+5)));
469}
470
471void rtl92c_init_driver_info_size(struct ieee80211_hw *hw, u8 size)
472{
473 struct rtl_priv *rtlpriv = rtl_priv(hw);
474 rtl_write_byte(rtlpriv, REG_RX_DRVINFO_SZ, size);
475}
476
477int rtl92c_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type)
478{
479 u8 value;
480 struct rtl_priv *rtlpriv = rtl_priv(hw);
481
482 switch (type) {
483 case NL80211_IFTYPE_UNSPECIFIED:
484 value = NT_NO_LINK;
485 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
486 ("Set Network type to NO LINK!\n"));
487 break;
488 case NL80211_IFTYPE_ADHOC:
489 value = NT_LINK_AD_HOC;
490 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
491 ("Set Network type to Ad Hoc!\n"));
492 break;
493 case NL80211_IFTYPE_STATION:
494 value = NT_LINK_AP;
495 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
496 ("Set Network type to STA!\n"));
497 break;
498 case NL80211_IFTYPE_AP:
499 value = NT_AS_AP;
500 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
501 ("Set Network type to AP!\n"));
502 break;
503 default:
504 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
505 ("Network type %d not support!\n", type));
506 return -EOPNOTSUPP;
507 }
508 rtl_write_byte(rtlpriv, (REG_CR + 2), value);
509 return 0;
510}
511
512void rtl92c_init_network_type(struct ieee80211_hw *hw)
513{
514 rtl92c_set_network_type(hw, NL80211_IFTYPE_UNSPECIFIED);
515}
516
517void rtl92c_init_adaptive_ctrl(struct ieee80211_hw *hw)
518{
519 u16 value16;
520 u32 value32;
521 struct rtl_priv *rtlpriv = rtl_priv(hw);
522
523 /* Response Rate Set */
524 value32 = rtl_read_dword(rtlpriv, REG_RRSR);
525 value32 &= ~RATE_BITMAP_ALL;
526 value32 |= RATE_RRSR_CCK_ONLY_1M;
527 rtl_write_dword(rtlpriv, REG_RRSR, value32);
528 /* SIFS (used in NAV) */
529 value16 = _SPEC_SIFS_CCK(0x10) | _SPEC_SIFS_OFDM(0x10);
530 rtl_write_word(rtlpriv, REG_SPEC_SIFS, value16);
531 /* Retry Limit */
532 value16 = _LRL(0x30) | _SRL(0x30);
533 rtl_write_dword(rtlpriv, REG_RL, value16);
534}
535
536void rtl92c_init_rate_fallback(struct ieee80211_hw *hw)
537{
538 struct rtl_priv *rtlpriv = rtl_priv(hw);
539
540 /* Set Data Auto Rate Fallback Retry Count register. */
541 rtl_write_dword(rtlpriv, REG_DARFRC, 0x00000000);
542 rtl_write_dword(rtlpriv, REG_DARFRC+4, 0x10080404);
543 rtl_write_dword(rtlpriv, REG_RARFRC, 0x04030201);
544 rtl_write_dword(rtlpriv, REG_RARFRC+4, 0x08070605);
545}
546
547static void rtl92c_set_cck_sifs(struct ieee80211_hw *hw, u8 trx_sifs,
548 u8 ctx_sifs)
549{
550 struct rtl_priv *rtlpriv = rtl_priv(hw);
551
552 rtl_write_byte(rtlpriv, REG_SIFS_CCK, trx_sifs);
553 rtl_write_byte(rtlpriv, (REG_SIFS_CCK + 1), ctx_sifs);
554}
555
556static void rtl92c_set_ofdm_sifs(struct ieee80211_hw *hw, u8 trx_sifs,
557 u8 ctx_sifs)
558{
559 struct rtl_priv *rtlpriv = rtl_priv(hw);
560
561 rtl_write_byte(rtlpriv, REG_SIFS_OFDM, trx_sifs);
562 rtl_write_byte(rtlpriv, (REG_SIFS_OFDM + 1), ctx_sifs);
563}
564
565void rtl92c_init_edca_param(struct ieee80211_hw *hw,
566 u16 queue, u16 txop, u8 cw_min, u8 cw_max, u8 aifs)
567{
568 /* sequence: VO, VI, BE, BK ==> the same as 92C hardware design.
569 * referenc : enum nl80211_txq_q or ieee80211_set_wmm_default function.
570 */
571 u32 value;
572 struct rtl_priv *rtlpriv = rtl_priv(hw);
573
574 value = (u32)aifs;
575 value |= ((u32)cw_min & 0xF) << 8;
576 value |= ((u32)cw_max & 0xF) << 12;
577 value |= (u32)txop << 16;
578 /* 92C hardware register sequence is the same as queue number. */
579 rtl_write_dword(rtlpriv, (REG_EDCA_VO_PARAM + (queue * 4)), value);
580}
581
582void rtl92c_init_edca(struct ieee80211_hw *hw)
583{
584 u16 value16;
585 struct rtl_priv *rtlpriv = rtl_priv(hw);
586
587 /* disable EDCCA count down, to reduce collison and retry */
588 value16 = rtl_read_word(rtlpriv, REG_RD_CTRL);
589 value16 |= DIS_EDCA_CNT_DWN;
590 rtl_write_word(rtlpriv, REG_RD_CTRL, value16);
591 /* Update SIFS timing. ??????????
592 * pHalData->SifsTime = 0x0e0e0a0a; */
593 rtl92c_set_cck_sifs(hw, 0xa, 0xa);
594 rtl92c_set_ofdm_sifs(hw, 0xe, 0xe);
595 /* Set CCK/OFDM SIFS to be 10us. */
596 rtl_write_word(rtlpriv, REG_SIFS_CCK, 0x0a0a);
597 rtl_write_word(rtlpriv, REG_SIFS_OFDM, 0x1010);
598 rtl_write_word(rtlpriv, REG_PROT_MODE_CTRL, 0x0204);
599 rtl_write_dword(rtlpriv, REG_BAR_MODE_CTRL, 0x014004);
600 /* TXOP */
601 rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, 0x005EA42B);
602 rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0x0000A44F);
603 rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, 0x005EA324);
604 rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x002FA226);
605 /* PIFS */
606 rtl_write_byte(rtlpriv, REG_PIFS, 0x1C);
607 /* AGGR BREAK TIME Register */
608 rtl_write_byte(rtlpriv, REG_AGGR_BREAK_TIME, 0x16);
609 rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0040);
610 rtl_write_byte(rtlpriv, REG_BCNDMATIM, 0x02);
611 rtl_write_byte(rtlpriv, REG_ATIMWND, 0x02);
612}
613
614void rtl92c_init_ampdu_aggregation(struct ieee80211_hw *hw)
615{
616 struct rtl_priv *rtlpriv = rtl_priv(hw);
617
618 rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, 0x99997631);
619 rtl_write_byte(rtlpriv, REG_AGGR_BREAK_TIME, 0x16);
620 /* init AMPDU aggregation number, tuning for Tx's TP, */
621 rtl_write_word(rtlpriv, 0x4CA, 0x0708);
622}
623
624void rtl92c_init_beacon_max_error(struct ieee80211_hw *hw, bool infra_mode)
625{
626 struct rtl_priv *rtlpriv = rtl_priv(hw);
627
628 rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0xFF);
629}
630
631void rtl92c_init_rdg_setting(struct ieee80211_hw *hw)
632{
633 struct rtl_priv *rtlpriv = rtl_priv(hw);
634
635 rtl_write_byte(rtlpriv, REG_RD_CTRL, 0xFF);
636 rtl_write_word(rtlpriv, REG_RD_NAV_NXT, 0x200);
637 rtl_write_byte(rtlpriv, REG_RD_RESP_PKT_TH, 0x05);
638}
639
640void rtl92c_init_retry_function(struct ieee80211_hw *hw)
641{
642 u8 value8;
643 struct rtl_priv *rtlpriv = rtl_priv(hw);
644
645 value8 = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL);
646 value8 |= EN_AMPDU_RTY_NEW;
647 rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL, value8);
648 /* Set ACK timeout */
649 rtl_write_byte(rtlpriv, REG_ACKTO, 0x40);
650}
651
652void rtl92c_init_beacon_parameters(struct ieee80211_hw *hw,
653 enum version_8192c version)
654{
655 struct rtl_priv *rtlpriv = rtl_priv(hw);
656 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
657
658 rtl_write_word(rtlpriv, REG_TBTT_PROHIBIT, 0x6404);/* ms */
659 rtl_write_byte(rtlpriv, REG_DRVERLYINT, DRIVER_EARLY_INT_TIME);/*ms*/
660 rtl_write_byte(rtlpriv, REG_BCNDMATIM, BCN_DMA_ATIME_INT_TIME);
661 if (IS_NORMAL_CHIP(rtlhal->version))
662 rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660F);
663 else
664 rtl_write_word(rtlpriv, REG_BCNTCFG, 0x66FF);
665}
666
667void rtl92c_disable_fast_edca(struct ieee80211_hw *hw)
668{
669 struct rtl_priv *rtlpriv = rtl_priv(hw);
670
671 rtl_write_word(rtlpriv, REG_FAST_EDCA_CTRL, 0);
672}
673
674void rtl92c_set_min_space(struct ieee80211_hw *hw, bool is2T)
675{
676 struct rtl_priv *rtlpriv = rtl_priv(hw);
677 u8 value = is2T ? MAX_MSS_DENSITY_2T : MAX_MSS_DENSITY_1T;
678
679 rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, value);
680}
681
682u16 rtl92c_get_mgt_filter(struct ieee80211_hw *hw)
683{
684 struct rtl_priv *rtlpriv = rtl_priv(hw);
685
686 return rtl_read_word(rtlpriv, REG_RXFLTMAP0);
687}
688
689void rtl92c_set_mgt_filter(struct ieee80211_hw *hw, u16 filter)
690{
691 struct rtl_priv *rtlpriv = rtl_priv(hw);
692
693 rtl_write_word(rtlpriv, REG_RXFLTMAP0, filter);
694}
695
696u16 rtl92c_get_ctrl_filter(struct ieee80211_hw *hw)
697{
698 struct rtl_priv *rtlpriv = rtl_priv(hw);
699
700 return rtl_read_word(rtlpriv, REG_RXFLTMAP1);
701}
702
703void rtl92c_set_ctrl_filter(struct ieee80211_hw *hw, u16 filter)
704{
705 struct rtl_priv *rtlpriv = rtl_priv(hw);
706
707 rtl_write_word(rtlpriv, REG_RXFLTMAP1, filter);
708}
709
710u16 rtl92c_get_data_filter(struct ieee80211_hw *hw)
711{
712 struct rtl_priv *rtlpriv = rtl_priv(hw);
713
714 return rtl_read_word(rtlpriv, REG_RXFLTMAP2);
715}
716
717void rtl92c_set_data_filter(struct ieee80211_hw *hw, u16 filter)
718{
719 struct rtl_priv *rtlpriv = rtl_priv(hw);
720
721 rtl_write_word(rtlpriv, REG_RXFLTMAP2, filter);
722}
723/*==============================================================*/
724
725static u8 _rtl92c_query_rxpwrpercentage(char antpower)
726{
727 if ((antpower <= -100) || (antpower >= 20))
728 return 0;
729 else if (antpower >= 0)
730 return 100;
731 else
732 return 100 + antpower;
733}
734
735static u8 _rtl92c_evm_db_to_percentage(char value)
736{
737 char ret_val;
738
739 ret_val = value;
740 if (ret_val >= 0)
741 ret_val = 0;
742 if (ret_val <= -33)
743 ret_val = -33;
744 ret_val = 0 - ret_val;
745 ret_val *= 3;
746 if (ret_val == 99)
747 ret_val = 100;
748 return ret_val;
749}
750
751static long _rtl92c_translate_todbm(struct ieee80211_hw *hw,
752 u8 signal_strength_index)
753{
754 long signal_power;
755
756 signal_power = (long)((signal_strength_index + 1) >> 1);
757 signal_power -= 95;
758 return signal_power;
759}
760
761static long _rtl92c_signal_scale_mapping(struct ieee80211_hw *hw,
762 long currsig)
763{
764 long retsig;
765
766 if (currsig >= 61 && currsig <= 100)
767 retsig = 90 + ((currsig - 60) / 4);
768 else if (currsig >= 41 && currsig <= 60)
769 retsig = 78 + ((currsig - 40) / 2);
770 else if (currsig >= 31 && currsig <= 40)
771 retsig = 66 + (currsig - 30);
772 else if (currsig >= 21 && currsig <= 30)
773 retsig = 54 + (currsig - 20);
774 else if (currsig >= 5 && currsig <= 20)
775 retsig = 42 + (((currsig - 5) * 2) / 3);
776 else if (currsig == 4)
777 retsig = 36;
778 else if (currsig == 3)
779 retsig = 27;
780 else if (currsig == 2)
781 retsig = 18;
782 else if (currsig == 1)
783 retsig = 9;
784 else
785 retsig = currsig;
786 return retsig;
787}
788
789static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
790 struct rtl_stats *pstats,
791 struct rx_desc_92c *pdesc,
792 struct rx_fwinfo_92c *p_drvinfo,
793 bool packet_match_bssid,
794 bool packet_toself,
795 bool packet_beacon)
796{
797 struct rtl_priv *rtlpriv = rtl_priv(hw);
798 struct rtl_phy *rtlphy = &(rtlpriv->phy);
799 struct phy_sts_cck_8192s_t *cck_buf;
800 s8 rx_pwr_all = 0, rx_pwr[4];
801 u8 rf_rx_num = 0, evm, pwdb_all;
802 u8 i, max_spatial_stream;
803 u32 rssi, total_rssi = 0;
804 bool in_powersavemode = false;
805 bool is_cck_rate;
806
807 is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc);
808 pstats->packet_matchbssid = packet_match_bssid;
809 pstats->packet_toself = packet_toself;
810 pstats->is_cck = is_cck_rate;
811 pstats->packet_beacon = packet_beacon;
812 pstats->is_cck = is_cck_rate;
813 pstats->RX_SIGQ[0] = -1;
814 pstats->RX_SIGQ[1] = -1;
815 if (is_cck_rate) {
816 u8 report, cck_highpwr;
817 cck_buf = (struct phy_sts_cck_8192s_t *)p_drvinfo;
818 if (!in_powersavemode)
819 cck_highpwr = rtlphy->cck_high_power;
820 else
821 cck_highpwr = false;
822 if (!cck_highpwr) {
823 u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
824 report = cck_buf->cck_agc_rpt & 0xc0;
825 report = report >> 6;
826 switch (report) {
827 case 0x3:
828 rx_pwr_all = -46 - (cck_agc_rpt & 0x3e);
829 break;
830 case 0x2:
831 rx_pwr_all = -26 - (cck_agc_rpt & 0x3e);
832 break;
833 case 0x1:
834 rx_pwr_all = -12 - (cck_agc_rpt & 0x3e);
835 break;
836 case 0x0:
837 rx_pwr_all = 16 - (cck_agc_rpt & 0x3e);
838 break;
839 }
840 } else {
841 u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
842 report = p_drvinfo->cfosho[0] & 0x60;
843 report = report >> 5;
844 switch (report) {
845 case 0x3:
846 rx_pwr_all = -46 - ((cck_agc_rpt & 0x1f) << 1);
847 break;
848 case 0x2:
849 rx_pwr_all = -26 - ((cck_agc_rpt & 0x1f) << 1);
850 break;
851 case 0x1:
852 rx_pwr_all = -12 - ((cck_agc_rpt & 0x1f) << 1);
853 break;
854 case 0x0:
855 rx_pwr_all = 16 - ((cck_agc_rpt & 0x1f) << 1);
856 break;
857 }
858 }
859 pwdb_all = _rtl92c_query_rxpwrpercentage(rx_pwr_all);
860 pstats->rx_pwdb_all = pwdb_all;
861 pstats->recvsignalpower = rx_pwr_all;
862 if (packet_match_bssid) {
863 u8 sq;
864 if (pstats->rx_pwdb_all > 40)
865 sq = 100;
866 else {
867 sq = cck_buf->sq_rpt;
868 if (sq > 64)
869 sq = 0;
870 else if (sq < 20)
871 sq = 100;
872 else
873 sq = ((64 - sq) * 100) / 44;
874 }
875 pstats->signalquality = sq;
876 pstats->RX_SIGQ[0] = sq;
877 pstats->RX_SIGQ[1] = -1;
878 }
879 } else {
880 rtlpriv->dm.rfpath_rxenable[0] =
881 rtlpriv->dm.rfpath_rxenable[1] = true;
882 for (i = RF90_PATH_A; i < RF90_PATH_MAX; i++) {
883 if (rtlpriv->dm.rfpath_rxenable[i])
884 rf_rx_num++;
885 rx_pwr[i] =
886 ((p_drvinfo->gain_trsw[i] & 0x3f) * 2) - 110;
887 rssi = _rtl92c_query_rxpwrpercentage(rx_pwr[i]);
888 total_rssi += rssi;
889 rtlpriv->stats.rx_snr_db[i] =
890 (long)(p_drvinfo->rxsnr[i] / 2);
891
892 if (packet_match_bssid)
893 pstats->rx_mimo_signalstrength[i] = (u8) rssi;
894 }
895 rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110;
896 pwdb_all = _rtl92c_query_rxpwrpercentage(rx_pwr_all);
897 pstats->rx_pwdb_all = pwdb_all;
898 pstats->rxpower = rx_pwr_all;
899 pstats->recvsignalpower = rx_pwr_all;
900 if (GET_RX_DESC_RX_MCS(pdesc) &&
901 GET_RX_DESC_RX_MCS(pdesc) >= DESC92C_RATEMCS8 &&
902 GET_RX_DESC_RX_MCS(pdesc) <= DESC92C_RATEMCS15)
903 max_spatial_stream = 2;
904 else
905 max_spatial_stream = 1;
906 for (i = 0; i < max_spatial_stream; i++) {
907 evm = _rtl92c_evm_db_to_percentage(p_drvinfo->rxevm[i]);
908 if (packet_match_bssid) {
909 if (i == 0)
910 pstats->signalquality =
911 (u8) (evm & 0xff);
912 pstats->RX_SIGQ[i] =
913 (u8) (evm & 0xff);
914 }
915 }
916 }
917 if (is_cck_rate)
918 pstats->signalstrength =
919 (u8) (_rtl92c_signal_scale_mapping(hw, pwdb_all));
920 else if (rf_rx_num != 0)
921 pstats->signalstrength =
922 (u8) (_rtl92c_signal_scale_mapping
923 (hw, total_rssi /= rf_rx_num));
924}
925
926static void _rtl92c_process_ui_rssi(struct ieee80211_hw *hw,
927 struct rtl_stats *pstats)
928{
929 struct rtl_priv *rtlpriv = rtl_priv(hw);
930 struct rtl_phy *rtlphy = &(rtlpriv->phy);
931 u8 rfpath;
932 u32 last_rssi, tmpval;
933
934 if (pstats->packet_toself || pstats->packet_beacon) {
935 rtlpriv->stats.rssi_calculate_cnt++;
936 if (rtlpriv->stats.ui_rssi.total_num++ >=
937 PHY_RSSI_SLID_WIN_MAX) {
938 rtlpriv->stats.ui_rssi.total_num =
939 PHY_RSSI_SLID_WIN_MAX;
940 last_rssi =
941 rtlpriv->stats.ui_rssi.elements[rtlpriv->
942 stats.ui_rssi.index];
943 rtlpriv->stats.ui_rssi.total_val -= last_rssi;
944 }
945 rtlpriv->stats.ui_rssi.total_val += pstats->signalstrength;
946 rtlpriv->stats.ui_rssi.elements[rtlpriv->stats.ui_rssi.
947 index++] = pstats->signalstrength;
948 if (rtlpriv->stats.ui_rssi.index >= PHY_RSSI_SLID_WIN_MAX)
949 rtlpriv->stats.ui_rssi.index = 0;
950 tmpval = rtlpriv->stats.ui_rssi.total_val /
951 rtlpriv->stats.ui_rssi.total_num;
952 rtlpriv->stats.signal_strength =
953 _rtl92c_translate_todbm(hw, (u8) tmpval);
954 pstats->rssi = rtlpriv->stats.signal_strength;
955 }
956 if (!pstats->is_cck && pstats->packet_toself) {
957 for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
958 rfpath++) {
959 if (!rtl8192_phy_check_is_legal_rfpath(hw, rfpath))
960 continue;
961 if (rtlpriv->stats.rx_rssi_percentage[rfpath] == 0) {
962 rtlpriv->stats.rx_rssi_percentage[rfpath] =
963 pstats->rx_mimo_signalstrength[rfpath];
964 }
965 if (pstats->rx_mimo_signalstrength[rfpath] >
966 rtlpriv->stats.rx_rssi_percentage[rfpath]) {
967 rtlpriv->stats.rx_rssi_percentage[rfpath] =
968 ((rtlpriv->stats.
969 rx_rssi_percentage[rfpath] *
970 (RX_SMOOTH_FACTOR - 1)) +
971 (pstats->rx_mimo_signalstrength[rfpath])) /
972 (RX_SMOOTH_FACTOR);
973
974 rtlpriv->stats.rx_rssi_percentage[rfpath] =
975 rtlpriv->stats.rx_rssi_percentage[rfpath] +
976 1;
977 } else {
978 rtlpriv->stats.rx_rssi_percentage[rfpath] =
979 ((rtlpriv->stats.
980 rx_rssi_percentage[rfpath] *
981 (RX_SMOOTH_FACTOR - 1)) +
982 (pstats->rx_mimo_signalstrength[rfpath])) /
983 (RX_SMOOTH_FACTOR);
984 }
985 }
986 }
987}
988
989static void _rtl92c_update_rxsignalstatistics(struct ieee80211_hw *hw,
990 struct rtl_stats *pstats)
991{
992 struct rtl_priv *rtlpriv = rtl_priv(hw);
993 int weighting = 0;
994
995 if (rtlpriv->stats.recv_signal_power == 0)
996 rtlpriv->stats.recv_signal_power = pstats->recvsignalpower;
997 if (pstats->recvsignalpower > rtlpriv->stats.recv_signal_power)
998 weighting = 5;
999 else if (pstats->recvsignalpower < rtlpriv->stats.recv_signal_power)
1000 weighting = (-5);
1001 rtlpriv->stats.recv_signal_power =
1002 (rtlpriv->stats.recv_signal_power * 5 +
1003 pstats->recvsignalpower + weighting) / 6;
1004}
1005
1006static void _rtl92c_process_pwdb(struct ieee80211_hw *hw,
1007 struct rtl_stats *pstats)
1008{
1009 struct rtl_priv *rtlpriv = rtl_priv(hw);
1010 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1011 long undecorated_smoothed_pwdb = 0;
1012
1013 if (mac->opmode == NL80211_IFTYPE_ADHOC) {
1014 return;
1015 } else {
1016 undecorated_smoothed_pwdb =
1017 rtlpriv->dm.undecorated_smoothed_pwdb;
1018 }
1019 if (pstats->packet_toself || pstats->packet_beacon) {
1020 if (undecorated_smoothed_pwdb < 0)
1021 undecorated_smoothed_pwdb = pstats->rx_pwdb_all;
1022 if (pstats->rx_pwdb_all > (u32) undecorated_smoothed_pwdb) {
1023 undecorated_smoothed_pwdb =
1024 (((undecorated_smoothed_pwdb) *
1025 (RX_SMOOTH_FACTOR - 1)) +
1026 (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
1027 undecorated_smoothed_pwdb = undecorated_smoothed_pwdb
1028 + 1;
1029 } else {
1030 undecorated_smoothed_pwdb =
1031 (((undecorated_smoothed_pwdb) *
1032 (RX_SMOOTH_FACTOR - 1)) +
1033 (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
1034 }
1035 rtlpriv->dm.undecorated_smoothed_pwdb =
1036 undecorated_smoothed_pwdb;
1037 _rtl92c_update_rxsignalstatistics(hw, pstats);
1038 }
1039}
1040
1041static void _rtl92c_process_LINK_Q(struct ieee80211_hw *hw,
1042 struct rtl_stats *pstats)
1043{
1044 struct rtl_priv *rtlpriv = rtl_priv(hw);
1045 u32 last_evm = 0, n_stream, tmpval;
1046
1047 if (pstats->signalquality != 0) {
1048 if (pstats->packet_toself || pstats->packet_beacon) {
1049 if (rtlpriv->stats.LINK_Q.total_num++ >=
1050 PHY_LINKQUALITY_SLID_WIN_MAX) {
1051 rtlpriv->stats.LINK_Q.total_num =
1052 PHY_LINKQUALITY_SLID_WIN_MAX;
1053 last_evm =
1054 rtlpriv->stats.LINK_Q.elements
1055 [rtlpriv->stats.LINK_Q.index];
1056 rtlpriv->stats.LINK_Q.total_val -=
1057 last_evm;
1058 }
1059 rtlpriv->stats.LINK_Q.total_val +=
1060 pstats->signalquality;
1061 rtlpriv->stats.LINK_Q.elements
1062 [rtlpriv->stats.LINK_Q.index++] =
1063 pstats->signalquality;
1064 if (rtlpriv->stats.LINK_Q.index >=
1065 PHY_LINKQUALITY_SLID_WIN_MAX)
1066 rtlpriv->stats.LINK_Q.index = 0;
1067 tmpval = rtlpriv->stats.LINK_Q.total_val /
1068 rtlpriv->stats.LINK_Q.total_num;
1069 rtlpriv->stats.signal_quality = tmpval;
1070 rtlpriv->stats.last_sigstrength_inpercent = tmpval;
1071 for (n_stream = 0; n_stream < 2;
1072 n_stream++) {
1073 if (pstats->RX_SIGQ[n_stream] != -1) {
1074 if (!rtlpriv->stats.RX_EVM[n_stream]) {
1075 rtlpriv->stats.RX_EVM[n_stream]
1076 = pstats->RX_SIGQ[n_stream];
1077 }
1078 rtlpriv->stats.RX_EVM[n_stream] =
1079 ((rtlpriv->stats.RX_EVM
1080 [n_stream] *
1081 (RX_SMOOTH_FACTOR - 1)) +
1082 (pstats->RX_SIGQ
1083 [n_stream] * 1)) /
1084 (RX_SMOOTH_FACTOR);
1085 }
1086 }
1087 }
1088 } else {
1089 ;
1090 }
1091}
1092
1093static void _rtl92c_process_phyinfo(struct ieee80211_hw *hw,
1094 u8 *buffer,
1095 struct rtl_stats *pcurrent_stats)
1096{
1097 if (!pcurrent_stats->packet_matchbssid &&
1098 !pcurrent_stats->packet_beacon)
1099 return;
1100 _rtl92c_process_ui_rssi(hw, pcurrent_stats);
1101 _rtl92c_process_pwdb(hw, pcurrent_stats);
1102 _rtl92c_process_LINK_Q(hw, pcurrent_stats);
1103}
1104
1105void rtl92c_translate_rx_signal_stuff(struct ieee80211_hw *hw,
1106 struct sk_buff *skb,
1107 struct rtl_stats *pstats,
1108 struct rx_desc_92c *pdesc,
1109 struct rx_fwinfo_92c *p_drvinfo)
1110{
1111 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1112 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1113 struct ieee80211_hdr *hdr;
1114 u8 *tmp_buf;
1115 u8 *praddr;
1116 u8 *psaddr;
1117 __le16 fc;
1118 u16 type, cpu_fc;
1119 bool packet_matchbssid, packet_toself, packet_beacon;
1120
1121 tmp_buf = skb->data + pstats->rx_drvinfo_size + pstats->rx_bufshift;
1122 hdr = (struct ieee80211_hdr *)tmp_buf;
1123 fc = hdr->frame_control;
1124 cpu_fc = le16_to_cpu(fc);
1125 type = WLAN_FC_GET_TYPE(fc);
1126 praddr = hdr->addr1;
1127 psaddr = hdr->addr2;
1128 packet_matchbssid =
1129 ((IEEE80211_FTYPE_CTL != type) &&
1130 (!compare_ether_addr(mac->bssid,
1131 (cpu_fc & IEEE80211_FCTL_TODS) ?
1132 hdr->addr1 : (cpu_fc & IEEE80211_FCTL_FROMDS) ?
1133 hdr->addr2 : hdr->addr3)) &&
1134 (!pstats->hwerror) && (!pstats->crc) && (!pstats->icv));
1135
1136 packet_toself = packet_matchbssid &&
1137 (!compare_ether_addr(praddr, rtlefuse->dev_addr));
1138 if (ieee80211_is_beacon(fc))
1139 packet_beacon = true;
1140 _rtl92c_query_rxphystatus(hw, pstats, pdesc, p_drvinfo,
1141 packet_matchbssid, packet_toself,
1142 packet_beacon);
1143 _rtl92c_process_phyinfo(hw, tmp_buf, pstats);
1144}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h
new file mode 100644
index 00000000000..298fdb724aa
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h
@@ -0,0 +1,180 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL92C_MAC_H__
31#define __RTL92C_MAC_H__
32
33#define LLT_LAST_ENTRY_OF_TX_PKT_BUFFER 255
34#define DRIVER_EARLY_INT_TIME 0x05
35#define BCN_DMA_ATIME_INT_TIME 0x02
36
37void rtl92c_read_chip_version(struct ieee80211_hw *hw);
38bool rtl92c_llt_write(struct ieee80211_hw *hw, u32 address, u32 data);
39bool rtl92c_init_llt_table(struct ieee80211_hw *hw, u32 boundary);
40void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index,
41 u8 *p_macaddr, bool is_group, u8 enc_algo,
42 bool is_wepkey, bool clear_all);
43void rtl92c_enable_interrupt(struct ieee80211_hw *hw);
44void rtl92c_disable_interrupt(struct ieee80211_hw *hw);
45void rtl92c_set_qos(struct ieee80211_hw *hw, int aci);
46
47
48/*---------------------------------------------------------------
49 * Hardware init functions
50 *---------------------------------------------------------------*/
51void rtl92c_set_mac_addr(struct ieee80211_hw *hw, const u8 *addr);
52void rtl92c_init_interrupt(struct ieee80211_hw *hw);
53void rtl92c_init_driver_info_size(struct ieee80211_hw *hw, u8 size);
54
55int rtl92c_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type);
56void rtl92c_init_network_type(struct ieee80211_hw *hw);
57void rtl92c_init_adaptive_ctrl(struct ieee80211_hw *hw);
58void rtl92c_init_rate_fallback(struct ieee80211_hw *hw);
59
60void rtl92c_init_edca_param(struct ieee80211_hw *hw,
61 u16 queue,
62 u16 txop,
63 u8 ecwmax,
64 u8 ecwmin,
65 u8 aifs);
66
67void rtl92c_init_edca(struct ieee80211_hw *hw);
68void rtl92c_init_ampdu_aggregation(struct ieee80211_hw *hw);
69void rtl92c_init_beacon_max_error(struct ieee80211_hw *hw, bool infra_mode);
70void rtl92c_init_rdg_setting(struct ieee80211_hw *hw);
71void rtl92c_init_retry_function(struct ieee80211_hw *hw);
72
73void rtl92c_init_beacon_parameters(struct ieee80211_hw *hw,
74 enum version_8192c version);
75
76void rtl92c_disable_fast_edca(struct ieee80211_hw *hw);
77void rtl92c_set_min_space(struct ieee80211_hw *hw, bool is2T);
78
79/* For filter */
80u16 rtl92c_get_mgt_filter(struct ieee80211_hw *hw);
81void rtl92c_set_mgt_filter(struct ieee80211_hw *hw, u16 filter);
82u16 rtl92c_get_ctrl_filter(struct ieee80211_hw *hw);
83void rtl92c_set_ctrl_filter(struct ieee80211_hw *hw, u16 filter);
84u16 rtl92c_get_data_filter(struct ieee80211_hw *hw);
85void rtl92c_set_data_filter(struct ieee80211_hw *hw, u16 filter);
86
87
88u32 rtl92c_get_txdma_status(struct ieee80211_hw *hw);
89
90#define RX_HAL_IS_CCK_RATE(_pdesc)\
91 (GET_RX_DESC_RX_MCS(_pdesc) == DESC92C_RATE1M ||\
92 GET_RX_DESC_RX_MCS(_pdesc) == DESC92C_RATE2M ||\
93 GET_RX_DESC_RX_MCS(_pdesc) == DESC92C_RATE5_5M ||\
94 GET_RX_DESC_RX_MCS(_pdesc) == DESC92C_RATE11M)
95
96struct rx_fwinfo_92c {
97 u8 gain_trsw[4];
98 u8 pwdb_all;
99 u8 cfosho[4];
100 u8 cfotail[4];
101 char rxevm[2];
102 char rxsnr[4];
103 u8 pdsnr[2];
104 u8 csi_current[2];
105 u8 csi_target[2];
106 u8 sigevm;
107 u8 max_ex_pwr;
108 u8 ex_intf_flag:1;
109 u8 sgi_en:1;
110 u8 rxsc:2;
111 u8 reserve:4;
112} __packed;
113
114struct rx_desc_92c {
115 u32 length:14;
116 u32 crc32:1;
117 u32 icverror:1;
118 u32 drv_infosize:4;
119 u32 security:3;
120 u32 qos:1;
121 u32 shift:2;
122 u32 phystatus:1;
123 u32 swdec:1;
124 u32 lastseg:1;
125 u32 firstseg:1;
126 u32 eor:1;
127 u32 own:1;
128 u32 macid:5; /* word 1 */
129 u32 tid:4;
130 u32 hwrsvd:5;
131 u32 paggr:1;
132 u32 faggr:1;
133 u32 a1_fit:4;
134 u32 a2_fit:4;
135 u32 pam:1;
136 u32 pwr:1;
137 u32 moredata:1;
138 u32 morefrag:1;
139 u32 type:2;
140 u32 mc:1;
141 u32 bc:1;
142 u32 seq:12; /* word 2 */
143 u32 frag:4;
144 u32 nextpktlen:14;
145 u32 nextind:1;
146 u32 rsvd:1;
147 u32 rxmcs:6; /* word 3 */
148 u32 rxht:1;
149 u32 amsdu:1;
150 u32 splcp:1;
151 u32 bandwidth:1;
152 u32 htc:1;
153 u32 tcpchk_rpt:1;
154 u32 ipcchk_rpt:1;
155 u32 tcpchk_valid:1;
156 u32 hwpcerr:1;
157 u32 hwpcind:1;
158 u32 iv0:16;
159 u32 iv1; /* word 4 */
160 u32 tsfl; /* word 5 */
161 u32 bufferaddress; /* word 6 */
162 u32 bufferaddress64; /* word 7 */
163} __packed;
164
165enum rtl_desc_qsel rtl92c_map_hwqueue_to_fwqueue(u16 fc,
166 unsigned int
167 skb_queue);
168void rtl92c_translate_rx_signal_stuff(struct ieee80211_hw *hw,
169 struct sk_buff *skb,
170 struct rtl_stats *pstats,
171 struct rx_desc_92c *pdesc,
172 struct rx_fwinfo_92c *p_drvinfo);
173
174/*---------------------------------------------------------------
175 * Card disable functions
176 *---------------------------------------------------------------*/
177
178
179
180#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
new file mode 100644
index 00000000000..dc65ef2bbea
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
@@ -0,0 +1,611 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../wifi.h"
31#include "../pci.h"
32#include "../ps.h"
33#include "reg.h"
34#include "def.h"
35#include "phy.h"
36#include "rf.h"
37#include "dm.h"
38#include "table.h"
39
40#include "../rtl8192c/phy_common.c"
41
42u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
43 enum radio_path rfpath, u32 regaddr, u32 bitmask)
44{
45 struct rtl_priv *rtlpriv = rtl_priv(hw);
46 u32 original_value, readback_value, bitshift;
47 struct rtl_phy *rtlphy = &(rtlpriv->phy);
48
49 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), "
50 "rfpath(%#x), bitmask(%#x)\n",
51 regaddr, rfpath, bitmask));
52 if (rtlphy->rf_mode != RF_OP_BY_FW) {
53 original_value = _rtl92c_phy_rf_serial_read(hw,
54 rfpath, regaddr);
55 } else {
56 original_value = _rtl92c_phy_fw_rf_serial_read(hw,
57 rfpath, regaddr);
58 }
59 bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
60 readback_value = (original_value & bitmask) >> bitshift;
61 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
62 ("regaddr(%#x), rfpath(%#x), "
63 "bitmask(%#x), original_value(%#x)\n",
64 regaddr, rfpath, bitmask, original_value));
65 return readback_value;
66}
67
68void rtl92c_phy_set_rf_reg(struct ieee80211_hw *hw,
69 enum radio_path rfpath,
70 u32 regaddr, u32 bitmask, u32 data)
71{
72 struct rtl_priv *rtlpriv = rtl_priv(hw);
73 struct rtl_phy *rtlphy = &(rtlpriv->phy);
74 u32 original_value, bitshift;
75
76 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
77 ("regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
78 regaddr, bitmask, data, rfpath));
79 if (rtlphy->rf_mode != RF_OP_BY_FW) {
80 if (bitmask != RFREG_OFFSET_MASK) {
81 original_value = _rtl92c_phy_rf_serial_read(hw,
82 rfpath,
83 regaddr);
84 bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
85 data =
86 ((original_value & (~bitmask)) |
87 (data << bitshift));
88 }
89 _rtl92c_phy_rf_serial_write(hw, rfpath, regaddr, data);
90 } else {
91 if (bitmask != RFREG_OFFSET_MASK) {
92 original_value = _rtl92c_phy_fw_rf_serial_read(hw,
93 rfpath,
94 regaddr);
95 bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
96 data =
97 ((original_value & (~bitmask)) |
98 (data << bitshift));
99 }
100 _rtl92c_phy_fw_rf_serial_write(hw, rfpath, regaddr, data);
101 }
102 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), "
103 "bitmask(%#x), data(%#x), rfpath(%#x)\n",
104 regaddr, bitmask, data, rfpath));
105}
106
107bool rtl92c_phy_mac_config(struct ieee80211_hw *hw)
108{
109 bool rtstatus;
110 struct rtl_priv *rtlpriv = rtl_priv(hw);
111 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
112 bool is92c = IS_92C_SERIAL(rtlhal->version);
113
114 rtstatus = _rtl92c_phy_config_mac_with_headerfile(hw);
115 if (is92c && IS_HARDWARE_TYPE_8192CE(rtlhal))
116 rtl_write_byte(rtlpriv, 0x14, 0x71);
117 return rtstatus;
118}
119
120bool rtl92c_phy_bb_config(struct ieee80211_hw *hw)
121{
122 bool rtstatus = true;
123 struct rtl_priv *rtlpriv = rtl_priv(hw);
124 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
125 u16 regval;
126 u8 b_reg_hwparafile = 1;
127
128 _rtl92c_phy_init_bb_rf_register_definition(hw);
129 regval = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
130 rtl_write_word(rtlpriv, REG_SYS_FUNC_EN, regval | BIT(13) |
131 BIT(0) | BIT(1));
132 rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL, 0x83);
133 rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL + 1, 0xdb);
134 rtl_write_byte(rtlpriv, REG_RF_CTRL, RF_EN | RF_RSTB | RF_SDMRSTB);
135 if (IS_HARDWARE_TYPE_8192CE(rtlhal)) {
136 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, FEN_PPLL | FEN_PCIEA |
137 FEN_DIO_PCIE | FEN_BB_GLB_RSTn | FEN_BBRSTB);
138 } else if (IS_HARDWARE_TYPE_8192CU(rtlhal)) {
139 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, FEN_USBA | FEN_USBD |
140 FEN_BB_GLB_RSTn | FEN_BBRSTB);
141 rtl_write_byte(rtlpriv, REG_LDOHCI12_CTRL, 0x0f);
142 }
143 rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL + 1, 0x80);
144 if (b_reg_hwparafile == 1)
145 rtstatus = _rtl92c_phy_bb8192c_config_parafile(hw);
146 return rtstatus;
147}
148
149static bool _rtl92c_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
150{
151 struct rtl_priv *rtlpriv = rtl_priv(hw);
152 struct rtl_phy *rtlphy = &(rtlpriv->phy);
153 u32 i;
154 u32 arraylength;
155 u32 *ptrarray;
156
157 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Read Rtl819XMACPHY_Array\n"));
158 arraylength = rtlphy->hwparam_tables[MAC_REG].length ;
159 ptrarray = rtlphy->hwparam_tables[MAC_REG].pdata;
160 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
161 ("Img:RTL8192CEMAC_2T_ARRAY\n"));
162 for (i = 0; i < arraylength; i = i + 2)
163 rtl_write_byte(rtlpriv, ptrarray[i], (u8) ptrarray[i + 1]);
164 return true;
165}
166
167static bool _rtl92c_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
168 u8 configtype)
169{
170 int i;
171 u32 *phy_regarray_table;
172 u32 *agctab_array_table;
173 u16 phy_reg_arraylen, agctab_arraylen;
174 struct rtl_priv *rtlpriv = rtl_priv(hw);
175 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
176 struct rtl_phy *rtlphy = &(rtlpriv->phy);
177
178 if (IS_92C_SERIAL(rtlhal->version)) {
179 agctab_arraylen = rtlphy->hwparam_tables[AGCTAB_2T].length;
180 agctab_array_table = rtlphy->hwparam_tables[AGCTAB_2T].pdata;
181 phy_reg_arraylen = rtlphy->hwparam_tables[PHY_REG_2T].length;
182 phy_regarray_table = rtlphy->hwparam_tables[PHY_REG_2T].pdata;
183 } else {
184 agctab_arraylen = rtlphy->hwparam_tables[AGCTAB_1T].length;
185 agctab_array_table = rtlphy->hwparam_tables[AGCTAB_1T].pdata;
186 phy_reg_arraylen = rtlphy->hwparam_tables[PHY_REG_1T].length;
187 phy_regarray_table = rtlphy->hwparam_tables[PHY_REG_1T].pdata;
188 }
189 if (configtype == BASEBAND_CONFIG_PHY_REG) {
190 for (i = 0; i < phy_reg_arraylen; i = i + 2) {
191 if (phy_regarray_table[i] == 0xfe)
192 mdelay(50);
193 else if (phy_regarray_table[i] == 0xfd)
194 mdelay(5);
195 else if (phy_regarray_table[i] == 0xfc)
196 mdelay(1);
197 else if (phy_regarray_table[i] == 0xfb)
198 udelay(50);
199 else if (phy_regarray_table[i] == 0xfa)
200 udelay(5);
201 else if (phy_regarray_table[i] == 0xf9)
202 udelay(1);
203 rtl_set_bbreg(hw, phy_regarray_table[i], MASKDWORD,
204 phy_regarray_table[i + 1]);
205 udelay(1);
206 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
207 ("The phy_regarray_table[0] is %x"
208 " Rtl819XPHY_REGArray[1] is %x\n",
209 phy_regarray_table[i],
210 phy_regarray_table[i + 1]));
211 }
212 rtl92c_phy_config_bb_external_pa(hw);
213 } else if (configtype == BASEBAND_CONFIG_AGC_TAB) {
214 for (i = 0; i < agctab_arraylen; i = i + 2) {
215 rtl_set_bbreg(hw, agctab_array_table[i], MASKDWORD,
216 agctab_array_table[i + 1]);
217 udelay(1);
218 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
219 ("The agctab_array_table[0] is "
220 "%x Rtl819XPHY_REGArray[1] is %x\n",
221 agctab_array_table[i],
222 agctab_array_table[i + 1]));
223 }
224 }
225 return true;
226}
227
228static bool _rtl92c_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
229 u8 configtype)
230{
231 struct rtl_priv *rtlpriv = rtl_priv(hw);
232 struct rtl_phy *rtlphy = &(rtlpriv->phy);
233 int i;
234 u32 *phy_regarray_table_pg;
235 u16 phy_regarray_pg_len;
236
237 rtlphy->pwrgroup_cnt = 0;
238 phy_regarray_pg_len = rtlphy->hwparam_tables[PHY_REG_PG].length;
239 phy_regarray_table_pg = rtlphy->hwparam_tables[PHY_REG_PG].pdata;
240 if (configtype == BASEBAND_CONFIG_PHY_REG) {
241 for (i = 0; i < phy_regarray_pg_len; i = i + 3) {
242 if (phy_regarray_table_pg[i] == 0xfe)
243 mdelay(50);
244 else if (phy_regarray_table_pg[i] == 0xfd)
245 mdelay(5);
246 else if (phy_regarray_table_pg[i] == 0xfc)
247 mdelay(1);
248 else if (phy_regarray_table_pg[i] == 0xfb)
249 udelay(50);
250 else if (phy_regarray_table_pg[i] == 0xfa)
251 udelay(5);
252 else if (phy_regarray_table_pg[i] == 0xf9)
253 udelay(1);
254 _rtl92c_store_pwrIndex_diffrate_offset(hw,
255 phy_regarray_table_pg[i],
256 phy_regarray_table_pg[i + 1],
257 phy_regarray_table_pg[i + 2]);
258 }
259 } else {
260 RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
261 ("configtype != BaseBand_Config_PHY_REG\n"));
262 }
263 return true;
264}
265
266bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
267 enum radio_path rfpath)
268{
269 int i;
270 u32 *radioa_array_table;
271 u32 *radiob_array_table;
272 u16 radioa_arraylen, radiob_arraylen;
273 struct rtl_priv *rtlpriv = rtl_priv(hw);
274 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
275 struct rtl_phy *rtlphy = &(rtlpriv->phy);
276
277 if (IS_92C_SERIAL(rtlhal->version)) {
278 radioa_arraylen = rtlphy->hwparam_tables[RADIOA_2T].length;
279 radioa_array_table = rtlphy->hwparam_tables[RADIOA_2T].pdata;
280 radiob_arraylen = rtlphy->hwparam_tables[RADIOB_2T].length;
281 radiob_array_table = rtlphy->hwparam_tables[RADIOB_2T].pdata;
282 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
283 ("Radio_A:RTL8192CERADIOA_2TARRAY\n"));
284 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
285 ("Radio_B:RTL8192CE_RADIOB_2TARRAY\n"));
286 } else {
287 radioa_arraylen = rtlphy->hwparam_tables[RADIOA_1T].length;
288 radioa_array_table = rtlphy->hwparam_tables[RADIOA_1T].pdata;
289 radiob_arraylen = rtlphy->hwparam_tables[RADIOB_1T].length;
290 radiob_array_table = rtlphy->hwparam_tables[RADIOB_1T].pdata;
291 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
292 ("Radio_A:RTL8192CE_RADIOA_1TARRAY\n"));
293 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
294 ("Radio_B:RTL8192CE_RADIOB_1TARRAY\n"));
295 }
296 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Radio No %x\n", rfpath));
297 switch (rfpath) {
298 case RF90_PATH_A:
299 for (i = 0; i < radioa_arraylen; i = i + 2) {
300 if (radioa_array_table[i] == 0xfe)
301 mdelay(50);
302 else if (radioa_array_table[i] == 0xfd)
303 mdelay(5);
304 else if (radioa_array_table[i] == 0xfc)
305 mdelay(1);
306 else if (radioa_array_table[i] == 0xfb)
307 udelay(50);
308 else if (radioa_array_table[i] == 0xfa)
309 udelay(5);
310 else if (radioa_array_table[i] == 0xf9)
311 udelay(1);
312 else {
313 rtl_set_rfreg(hw, rfpath, radioa_array_table[i],
314 RFREG_OFFSET_MASK,
315 radioa_array_table[i + 1]);
316 udelay(1);
317 }
318 }
319 _rtl92c_phy_config_rf_external_pa(hw, rfpath);
320 break;
321 case RF90_PATH_B:
322 for (i = 0; i < radiob_arraylen; i = i + 2) {
323 if (radiob_array_table[i] == 0xfe) {
324 mdelay(50);
325 } else if (radiob_array_table[i] == 0xfd)
326 mdelay(5);
327 else if (radiob_array_table[i] == 0xfc)
328 mdelay(1);
329 else if (radiob_array_table[i] == 0xfb)
330 udelay(50);
331 else if (radiob_array_table[i] == 0xfa)
332 udelay(5);
333 else if (radiob_array_table[i] == 0xf9)
334 udelay(1);
335 else {
336 rtl_set_rfreg(hw, rfpath, radiob_array_table[i],
337 RFREG_OFFSET_MASK,
338 radiob_array_table[i + 1]);
339 udelay(1);
340 }
341 }
342 break;
343 case RF90_PATH_C:
344 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
345 ("switch case not process\n"));
346 break;
347 case RF90_PATH_D:
348 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
349 ("switch case not process\n"));
350 break;
351 }
352 return true;
353}
354
355void rtl92c_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
356{
357 struct rtl_priv *rtlpriv = rtl_priv(hw);
358 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
359 struct rtl_phy *rtlphy = &(rtlpriv->phy);
360 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
361 u8 reg_bw_opmode;
362 u8 reg_prsr_rsc;
363
364 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
365 ("Switch to %s bandwidth\n",
366 rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
367 "20MHz" : "40MHz"))
368 if (is_hal_stop(rtlhal)) {
369 rtlphy->set_bwmode_inprogress = false;
370 return;
371 }
372 reg_bw_opmode = rtl_read_byte(rtlpriv, REG_BWOPMODE);
373 reg_prsr_rsc = rtl_read_byte(rtlpriv, REG_RRSR + 2);
374 switch (rtlphy->current_chan_bw) {
375 case HT_CHANNEL_WIDTH_20:
376 reg_bw_opmode |= BW_OPMODE_20MHZ;
377 rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
378 break;
379 case HT_CHANNEL_WIDTH_20_40:
380 reg_bw_opmode &= ~BW_OPMODE_20MHZ;
381 rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
382 reg_prsr_rsc =
383 (reg_prsr_rsc & 0x90) | (mac->cur_40_prime_sc << 5);
384 rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc);
385 break;
386 default:
387 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
388 ("unknown bandwidth: %#X\n", rtlphy->current_chan_bw));
389 break;
390 }
391 switch (rtlphy->current_chan_bw) {
392 case HT_CHANNEL_WIDTH_20:
393 rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x0);
394 rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x0);
395 rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 1);
396 break;
397 case HT_CHANNEL_WIDTH_20_40:
398 rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x1);
399 rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x1);
400 rtl_set_bbreg(hw, RCCK0_SYSTEM, BCCK_SIDEBAND,
401 (mac->cur_40_prime_sc >> 1));
402 rtl_set_bbreg(hw, ROFDM1_LSTF, 0xC00, mac->cur_40_prime_sc);
403 rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 0);
404 rtl_set_bbreg(hw, 0x818, (BIT(26) | BIT(27)),
405 (mac->cur_40_prime_sc ==
406 HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1);
407 break;
408 default:
409 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
410 ("unknown bandwidth: %#X\n", rtlphy->current_chan_bw));
411 break;
412 }
413 rtl92c_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
414 rtlphy->set_bwmode_inprogress = false;
415 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n"));
416}
417
418void rtl92c_bb_block_on(struct ieee80211_hw *hw)
419{
420 struct rtl_priv *rtlpriv = rtl_priv(hw);
421
422 mutex_lock(&rtlpriv->io.bb_mutex);
423 rtl_set_bbreg(hw, RFPGA0_RFMOD, BCCKEN, 0x1);
424 rtl_set_bbreg(hw, RFPGA0_RFMOD, BOFDMEN, 0x1);
425 mutex_unlock(&rtlpriv->io.bb_mutex);
426}
427
428static void _rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
429{
430 u8 tmpreg;
431 u32 rf_a_mode = 0, rf_b_mode = 0, lc_cal;
432 struct rtl_priv *rtlpriv = rtl_priv(hw);
433
434 tmpreg = rtl_read_byte(rtlpriv, 0xd03);
435
436 if ((tmpreg & 0x70) != 0)
437 rtl_write_byte(rtlpriv, 0xd03, tmpreg & 0x8F);
438 else
439 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
440
441 if ((tmpreg & 0x70) != 0) {
442 rf_a_mode = rtl_get_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS);
443 if (is2t)
444 rf_b_mode = rtl_get_rfreg(hw, RF90_PATH_B, 0x00,
445 MASK12BITS);
446 rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS,
447 (rf_a_mode & 0x8FFFF) | 0x10000);
448 if (is2t)
449 rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASK12BITS,
450 (rf_b_mode & 0x8FFFF) | 0x10000);
451 }
452 lc_cal = rtl_get_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS);
453 rtl_set_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS, lc_cal | 0x08000);
454 mdelay(100);
455 if ((tmpreg & 0x70) != 0) {
456 rtl_write_byte(rtlpriv, 0xd03, tmpreg);
457 rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS, rf_a_mode);
458 if (is2t)
459 rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASK12BITS,
460 rf_b_mode);
461 } else {
462 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
463 }
464}
465
466static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
467 enum rf_pwrstate rfpwr_state)
468{
469 struct rtl_priv *rtlpriv = rtl_priv(hw);
470 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
471 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
472 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
473 bool bresult = true;
474 u8 i, queue_id;
475 struct rtl8192_tx_ring *ring = NULL;
476
477 ppsc->set_rfpowerstate_inprogress = true;
478 switch (rfpwr_state) {
479 case ERFON:
480 if ((ppsc->rfpwr_state == ERFOFF) &&
481 RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) {
482 bool rtstatus;
483 u32 InitializeCount = 0;
484
485 do {
486 InitializeCount++;
487 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
488 ("IPS Set eRf nic enable\n"));
489 rtstatus = rtl_ps_enable_nic(hw);
490 } while ((rtstatus != true)
491 && (InitializeCount < 10));
492 RT_CLEAR_PS_LEVEL(ppsc,
493 RT_RF_OFF_LEVL_HALT_NIC);
494 } else {
495 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
496 ("Set ERFON sleeped:%d ms\n",
497 jiffies_to_msecs(jiffies -
498 ppsc->
499 last_sleep_jiffies)));
500 ppsc->last_awake_jiffies = jiffies;
501 rtl92ce_phy_set_rf_on(hw);
502 }
503 if (mac->link_state == MAC80211_LINKED) {
504 rtlpriv->cfg->ops->led_control(hw,
505 LED_CTL_LINK);
506 } else {
507 rtlpriv->cfg->ops->led_control(hw,
508 LED_CTL_NO_LINK);
509 }
510 break;
511 case ERFOFF:
512 for (queue_id = 0, i = 0;
513 queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
514 ring = &pcipriv->dev.tx_ring[queue_id];
515 if (skb_queue_len(&ring->queue) == 0 ||
516 queue_id == BEACON_QUEUE) {
517 queue_id++;
518 continue;
519 } else {
520 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
521 ("eRf Off/Sleep: %d times "
522 "TcbBusyQueue[%d] "
523 "=%d before doze!\n", (i + 1),
524 queue_id,
525 skb_queue_len(&ring->queue)));
526 udelay(10);
527 i++;
528 }
529 if (i >= MAX_DOZE_WAITING_TIMES_9x) {
530 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
531 ("\nERFOFF: %d times "
532 "TcbBusyQueue[%d] = %d !\n",
533 MAX_DOZE_WAITING_TIMES_9x,
534 queue_id,
535 skb_queue_len(&ring->queue)));
536 break;
537 }
538 }
539 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
540 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
541 ("IPS Set eRf nic disable\n"));
542 rtl_ps_disable_nic(hw);
543 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
544 } else {
545 if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS) {
546 rtlpriv->cfg->ops->led_control(hw,
547 LED_CTL_NO_LINK);
548 } else {
549 rtlpriv->cfg->ops->led_control(hw,
550 LED_CTL_POWER_OFF);
551 }
552 }
553 break;
554 case ERFSLEEP:
555 if (ppsc->rfpwr_state == ERFOFF)
556 break;
557 for (queue_id = 0, i = 0;
558 queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
559 ring = &pcipriv->dev.tx_ring[queue_id];
560 if (skb_queue_len(&ring->queue) == 0) {
561 queue_id++;
562 continue;
563 } else {
564 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
565 ("eRf Off/Sleep: %d times "
566 "TcbBusyQueue[%d] =%d before "
567 "doze!\n", (i + 1), queue_id,
568 skb_queue_len(&ring->queue)));
569 udelay(10);
570 i++;
571 }
572 if (i >= MAX_DOZE_WAITING_TIMES_9x) {
573 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
574 ("\n ERFSLEEP: %d times "
575 "TcbBusyQueue[%d] = %d !\n",
576 MAX_DOZE_WAITING_TIMES_9x,
577 queue_id,
578 skb_queue_len(&ring->queue)));
579 break;
580 }
581 }
582 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
583 ("Set ERFSLEEP awaked:%d ms\n",
584 jiffies_to_msecs(jiffies -
585 ppsc->last_awake_jiffies)));
586 ppsc->last_sleep_jiffies = jiffies;
587 _rtl92ce_phy_set_rf_sleep(hw);
588 break;
589 default:
590 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
591 ("switch case not process\n"));
592 bresult = false;
593 break;
594 }
595 if (bresult)
596 ppsc->rfpwr_state = rfpwr_state;
597 ppsc->set_rfpowerstate_inprogress = false;
598 return bresult;
599}
600
601bool rtl92c_phy_set_rf_power_state(struct ieee80211_hw *hw,
602 enum rf_pwrstate rfpwr_state)
603{
604 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
605 bool bresult = false;
606
607 if (rfpwr_state == ppsc->rfpwr_state)
608 return bresult;
609 bresult = _rtl92ce_phy_set_rf_power_state(hw, rfpwr_state);
610 return bresult;
611}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.h b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.h
new file mode 100644
index 00000000000..c456c15afbf
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.h
@@ -0,0 +1,34 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../rtl8192ce/phy.h"
31
32void rtl92c_bb_block_on(struct ieee80211_hw *hw);
33bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw, u32 rfpath);
34void rtl92c_phy_set_io(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/reg.h b/drivers/net/wireless/rtlwifi/rtl8192cu/reg.h
new file mode 100644
index 00000000000..7f1be614c99
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/reg.h
@@ -0,0 +1,30 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../rtl8192ce/reg.h"
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
new file mode 100644
index 00000000000..9149adcc8fa
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
@@ -0,0 +1,493 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../wifi.h"
31#include "reg.h"
32#include "def.h"
33#include "phy.h"
34#include "rf.h"
35#include "dm.h"
36
37static bool _rtl92c_phy_rf6052_config_parafile(struct ieee80211_hw *hw);
38
39void rtl92c_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
40{
41 struct rtl_priv *rtlpriv = rtl_priv(hw);
42 struct rtl_phy *rtlphy = &(rtlpriv->phy);
43
44 switch (bandwidth) {
45 case HT_CHANNEL_WIDTH_20:
46 rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] &
47 0xfffff3ff) | 0x0400);
48 rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK,
49 rtlphy->rfreg_chnlval[0]);
50 break;
51 case HT_CHANNEL_WIDTH_20_40:
52 rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] &
53 0xfffff3ff));
54 rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK,
55 rtlphy->rfreg_chnlval[0]);
56 break;
57 default:
58 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
59 ("unknown bandwidth: %#X\n", bandwidth));
60 break;
61 }
62}
63
64void rtl92c_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
65 u8 *ppowerlevel)
66{
67 struct rtl_priv *rtlpriv = rtl_priv(hw);
68 struct rtl_phy *rtlphy = &(rtlpriv->phy);
69 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
70 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
71 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
72 u32 tx_agc[2] = { 0, 0 }, tmpval = 0;
73 bool turbo_scanoff = false;
74 u8 idx1, idx2;
75 u8 *ptr;
76
77 if (rtlhal->interface == INTF_PCI) {
78 if (rtlefuse->eeprom_regulatory != 0)
79 turbo_scanoff = true;
80 } else {
81 if ((rtlefuse->eeprom_regulatory != 0) ||
82 (rtlefuse->external_pa))
83 turbo_scanoff = true;
84 }
85 if (mac->act_scanning == true) {
86 tx_agc[RF90_PATH_A] = 0x3f3f3f3f;
87 tx_agc[RF90_PATH_B] = 0x3f3f3f3f;
88 if (turbo_scanoff) {
89 for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
90 tx_agc[idx1] = ppowerlevel[idx1] |
91 (ppowerlevel[idx1] << 8) |
92 (ppowerlevel[idx1] << 16) |
93 (ppowerlevel[idx1] << 24);
94 if (rtlhal->interface == INTF_USB) {
95 if (tx_agc[idx1] > 0x20 &&
96 rtlefuse->external_pa)
97 tx_agc[idx1] = 0x20;
98 }
99 }
100 }
101 } else {
102 if (rtlpriv->dm.dynamic_txhighpower_lvl ==
103 TXHIGHPWRLEVEL_LEVEL1) {
104 tx_agc[RF90_PATH_A] = 0x10101010;
105 tx_agc[RF90_PATH_B] = 0x10101010;
106 } else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
107 TXHIGHPWRLEVEL_LEVEL1) {
108 tx_agc[RF90_PATH_A] = 0x00000000;
109 tx_agc[RF90_PATH_B] = 0x00000000;
110 } else{
111 for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
112 tx_agc[idx1] = ppowerlevel[idx1] |
113 (ppowerlevel[idx1] << 8) |
114 (ppowerlevel[idx1] << 16) |
115 (ppowerlevel[idx1] << 24);
116 }
117 if (rtlefuse->eeprom_regulatory == 0) {
118 tmpval = (rtlphy->mcs_txpwrlevel_origoffset
119 [0][6]) +
120 (rtlphy->mcs_txpwrlevel_origoffset
121 [0][7] << 8);
122 tx_agc[RF90_PATH_A] += tmpval;
123 tmpval = (rtlphy->mcs_txpwrlevel_origoffset
124 [0][14]) +
125 (rtlphy->mcs_txpwrlevel_origoffset
126 [0][15] << 24);
127 tx_agc[RF90_PATH_B] += tmpval;
128 }
129 }
130 }
131 for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
132 ptr = (u8 *) (&(tx_agc[idx1]));
133 for (idx2 = 0; idx2 < 4; idx2++) {
134 if (*ptr > RF6052_MAX_TX_PWR)
135 *ptr = RF6052_MAX_TX_PWR;
136 ptr++;
137 }
138 }
139 tmpval = tx_agc[RF90_PATH_A] & 0xff;
140 rtl_set_bbreg(hw, RTXAGC_A_CCK1_MCS32, MASKBYTE1, tmpval);
141
142 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
143 ("CCK PWR 1M (rf-A) = 0x%x (reg 0x%x)\n", tmpval,
144 RTXAGC_A_CCK1_MCS32));
145
146 tmpval = tx_agc[RF90_PATH_A] >> 8;
147 if (mac->mode == WIRELESS_MODE_B)
148 tmpval = tmpval & 0xff00ffff;
149 rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval);
150 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
151 ("CCK PWR 2~11M (rf-A) = 0x%x (reg 0x%x)\n", tmpval,
152 RTXAGC_B_CCK11_A_CCK2_11));
153 tmpval = tx_agc[RF90_PATH_B] >> 24;
154 rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, MASKBYTE0, tmpval);
155 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
156 ("CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n", tmpval,
157 RTXAGC_B_CCK11_A_CCK2_11));
158 tmpval = tx_agc[RF90_PATH_B] & 0x00ffffff;
159 rtl_set_bbreg(hw, RTXAGC_B_CCK1_55_MCS32, 0xffffff00, tmpval);
160 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
161 ("CCK PWR 1~5.5M (rf-B) = 0x%x (reg 0x%x)\n", tmpval,
162 RTXAGC_B_CCK1_55_MCS32));
163}
164
165static void rtl92c_phy_get_power_base(struct ieee80211_hw *hw,
166 u8 *ppowerlevel, u8 channel,
167 u32 *ofdmbase, u32 *mcsbase)
168{
169 struct rtl_priv *rtlpriv = rtl_priv(hw);
170 struct rtl_phy *rtlphy = &(rtlpriv->phy);
171 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
172 u32 powerBase0, powerBase1;
173 u8 legacy_pwrdiff = 0, ht20_pwrdiff = 0;
174 u8 i, powerlevel[2];
175
176 for (i = 0; i < 2; i++) {
177 powerlevel[i] = ppowerlevel[i];
178 legacy_pwrdiff = rtlefuse->txpwr_legacyhtdiff[i][channel - 1];
179 powerBase0 = powerlevel[i] + legacy_pwrdiff;
180 powerBase0 = (powerBase0 << 24) | (powerBase0 << 16) |
181 (powerBase0 << 8) | powerBase0;
182 *(ofdmbase + i) = powerBase0;
183 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
184 (" [OFDM power base index rf(%c) = 0x%x]\n",
185 ((i == 0) ? 'A' : 'B'), *(ofdmbase + i)));
186 }
187 for (i = 0; i < 2; i++) {
188 if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) {
189 ht20_pwrdiff = rtlefuse->txpwr_ht20diff[i][channel - 1];
190 powerlevel[i] += ht20_pwrdiff;
191 }
192 powerBase1 = powerlevel[i];
193 powerBase1 = (powerBase1 << 24) |
194 (powerBase1 << 16) | (powerBase1 << 8) | powerBase1;
195 *(mcsbase + i) = powerBase1;
196 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
197 (" [MCS power base index rf(%c) = 0x%x]\n",
198 ((i == 0) ? 'A' : 'B'), *(mcsbase + i)));
199 }
200}
201
202static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
203 u8 channel, u8 index,
204 u32 *powerBase0,
205 u32 *powerBase1,
206 u32 *p_outwriteval)
207{
208 struct rtl_priv *rtlpriv = rtl_priv(hw);
209 struct rtl_phy *rtlphy = &(rtlpriv->phy);
210 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
211 u8 i, chnlgroup = 0, pwr_diff_limit[4];
212 u32 writeVal, customer_limit, rf;
213
214 for (rf = 0; rf < 2; rf++) {
215 switch (rtlefuse->eeprom_regulatory) {
216 case 0:
217 chnlgroup = 0;
218 writeVal = rtlphy->mcs_txpwrlevel_origoffset
219 [chnlgroup][index + (rf ? 8 : 0)]
220 + ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
221 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
222 ("RTK better performance,writeVal(%c) = 0x%x\n",
223 ((rf == 0) ? 'A' : 'B'), writeVal));
224 break;
225 case 1:
226 if (rtlphy->pwrgroup_cnt == 1)
227 chnlgroup = 0;
228 if (rtlphy->pwrgroup_cnt >= 3) {
229 if (channel <= 3)
230 chnlgroup = 0;
231 else if (channel >= 4 && channel <= 9)
232 chnlgroup = 1;
233 else if (channel > 9)
234 chnlgroup = 2;
235 if (rtlphy->current_chan_bw ==
236 HT_CHANNEL_WIDTH_20)
237 chnlgroup++;
238 else
239 chnlgroup += 4;
240 }
241 writeVal = rtlphy->mcs_txpwrlevel_origoffset
242 [chnlgroup][index +
243 (rf ? 8 : 0)] +
244 ((index < 2) ? powerBase0[rf] :
245 powerBase1[rf]);
246 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
247 ("Realtek regulatory, 20MHz, "
248 "writeVal(%c) = 0x%x\n",
249 ((rf == 0) ? 'A' : 'B'), writeVal));
250 break;
251 case 2:
252 writeVal = ((index < 2) ? powerBase0[rf] :
253 powerBase1[rf]);
254 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
255 ("Better regulatory,writeVal(%c) = 0x%x\n",
256 ((rf == 0) ? 'A' : 'B'), writeVal));
257 break;
258 case 3:
259 chnlgroup = 0;
260 if (rtlphy->current_chan_bw ==
261 HT_CHANNEL_WIDTH_20_40) {
262 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
263 ("customer's limit, 40MHzrf(%c) = "
264 "0x%x\n", ((rf == 0) ? 'A' : 'B'),
265 rtlefuse->pwrgroup_ht40[rf]
266 [channel - 1]));
267 } else {
268 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
269 ("customer's limit, 20MHz rf(%c) = "
270 "0x%x\n", ((rf == 0) ? 'A' : 'B'),
271 rtlefuse->pwrgroup_ht20[rf]
272 [channel - 1]));
273 }
274 for (i = 0; i < 4; i++) {
275 pwr_diff_limit[i] =
276 (u8) ((rtlphy->mcs_txpwrlevel_origoffset
277 [chnlgroup][index + (rf ? 8 : 0)]
278 & (0x7f << (i * 8))) >> (i * 8));
279 if (rtlphy->current_chan_bw ==
280 HT_CHANNEL_WIDTH_20_40) {
281 if (pwr_diff_limit[i] >
282 rtlefuse->pwrgroup_ht40[rf]
283 [channel - 1])
284 pwr_diff_limit[i] = rtlefuse->
285 pwrgroup_ht40[rf]
286 [channel - 1];
287 } else {
288 if (pwr_diff_limit[i] >
289 rtlefuse->pwrgroup_ht20[rf]
290 [channel - 1])
291 pwr_diff_limit[i] =
292 rtlefuse->pwrgroup_ht20[rf]
293 [channel - 1];
294 }
295 }
296 customer_limit = (pwr_diff_limit[3] << 24) |
297 (pwr_diff_limit[2] << 16) |
298 (pwr_diff_limit[1] << 8) | (pwr_diff_limit[0]);
299 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
300 ("Customer's limit rf(%c) = 0x%x\n",
301 ((rf == 0) ? 'A' : 'B'), customer_limit));
302 writeVal = customer_limit + ((index < 2) ?
303 powerBase0[rf] : powerBase1[rf]);
304 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
305 ("Customer, writeVal rf(%c)= 0x%x\n",
306 ((rf == 0) ? 'A' : 'B'), writeVal));
307 break;
308 default:
309 chnlgroup = 0;
310 writeVal = rtlphy->mcs_txpwrlevel_origoffset[chnlgroup]
311 [index + (rf ? 8 : 0)] + ((index < 2) ?
312 powerBase0[rf] : powerBase1[rf]);
313 RTPRINT(rtlpriv, FPHY, PHY_TXPWR, ("RTK better "
314 "performance, writeValrf(%c) = 0x%x\n",
315 ((rf == 0) ? 'A' : 'B'), writeVal));
316 break;
317 }
318 if (rtlpriv->dm.dynamic_txhighpower_lvl ==
319 TXHIGHPWRLEVEL_LEVEL1)
320 writeVal = 0x14141414;
321 else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
322 TXHIGHPWRLEVEL_LEVEL2)
323 writeVal = 0x00000000;
324 if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT1)
325 writeVal = writeVal - 0x06060606;
326 else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
327 TXHIGHPWRLEVEL_BT2)
328 writeVal = writeVal;
329 *(p_outwriteval + rf) = writeVal;
330 }
331}
332
333static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw,
334 u8 index, u32 *pValue)
335{
336 struct rtl_priv *rtlpriv = rtl_priv(hw);
337 struct rtl_phy *rtlphy = &(rtlpriv->phy);
338 u16 regoffset_a[6] = {
339 RTXAGC_A_RATE18_06, RTXAGC_A_RATE54_24,
340 RTXAGC_A_MCS03_MCS00, RTXAGC_A_MCS07_MCS04,
341 RTXAGC_A_MCS11_MCS08, RTXAGC_A_MCS15_MCS12
342 };
343 u16 regoffset_b[6] = {
344 RTXAGC_B_RATE18_06, RTXAGC_B_RATE54_24,
345 RTXAGC_B_MCS03_MCS00, RTXAGC_B_MCS07_MCS04,
346 RTXAGC_B_MCS11_MCS08, RTXAGC_B_MCS15_MCS12
347 };
348 u8 i, rf, pwr_val[4];
349 u32 writeVal;
350 u16 regoffset;
351
352 for (rf = 0; rf < 2; rf++) {
353 writeVal = pValue[rf];
354 for (i = 0; i < 4; i++) {
355 pwr_val[i] = (u8)((writeVal & (0x7f << (i * 8))) >>
356 (i * 8));
357 if (pwr_val[i] > RF6052_MAX_TX_PWR)
358 pwr_val[i] = RF6052_MAX_TX_PWR;
359 }
360 writeVal = (pwr_val[3] << 24) | (pwr_val[2] << 16) |
361 (pwr_val[1] << 8) | pwr_val[0];
362 if (rf == 0)
363 regoffset = regoffset_a[index];
364 else
365 regoffset = regoffset_b[index];
366 rtl_set_bbreg(hw, regoffset, MASKDWORD, writeVal);
367 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
368 ("Set 0x%x = %08x\n", regoffset, writeVal));
369 if (((get_rf_type(rtlphy) == RF_2T2R) &&
370 (regoffset == RTXAGC_A_MCS15_MCS12 ||
371 regoffset == RTXAGC_B_MCS15_MCS12)) ||
372 ((get_rf_type(rtlphy) != RF_2T2R) &&
373 (regoffset == RTXAGC_A_MCS07_MCS04 ||
374 regoffset == RTXAGC_B_MCS07_MCS04))) {
375 writeVal = pwr_val[3];
376 if (regoffset == RTXAGC_A_MCS15_MCS12 ||
377 regoffset == RTXAGC_A_MCS07_MCS04)
378 regoffset = 0xc90;
379 if (regoffset == RTXAGC_B_MCS15_MCS12 ||
380 regoffset == RTXAGC_B_MCS07_MCS04)
381 regoffset = 0xc98;
382 for (i = 0; i < 3; i++) {
383 writeVal = (writeVal > 6) ? (writeVal - 6) : 0;
384 rtl_write_byte(rtlpriv, (u32)(regoffset + i),
385 (u8)writeVal);
386 }
387 }
388 }
389}
390
391void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
392 u8 *ppowerlevel, u8 channel)
393{
394 u32 writeVal[2], powerBase0[2], powerBase1[2];
395 u8 index = 0;
396
397 rtl92c_phy_get_power_base(hw, ppowerlevel,
398 channel, &powerBase0[0], &powerBase1[0]);
399 for (index = 0; index < 6; index++) {
400 _rtl92c_get_txpower_writeval_by_regulatory(hw,
401 channel, index,
402 &powerBase0[0],
403 &powerBase1[0],
404 &writeVal[0]);
405 _rtl92c_write_ofdm_power_reg(hw, index, &writeVal[0]);
406 }
407}
408
409bool rtl92c_phy_rf6052_config(struct ieee80211_hw *hw)
410{
411 struct rtl_priv *rtlpriv = rtl_priv(hw);
412 struct rtl_phy *rtlphy = &(rtlpriv->phy);
413 bool rtstatus = true;
414 u8 b_reg_hwparafile = 1;
415
416 if (rtlphy->rf_type == RF_1T1R)
417 rtlphy->num_total_rfpath = 1;
418 else
419 rtlphy->num_total_rfpath = 2;
420 if (b_reg_hwparafile == 1)
421 rtstatus = _rtl92c_phy_rf6052_config_parafile(hw);
422 return rtstatus;
423}
424
425static bool _rtl92c_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
426{
427 struct rtl_priv *rtlpriv = rtl_priv(hw);
428 struct rtl_phy *rtlphy = &(rtlpriv->phy);
429 u32 u4_regvalue = 0;
430 u8 rfpath;
431 bool rtstatus = true;
432 struct bb_reg_def *pphyreg;
433
434 for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) {
435 pphyreg = &rtlphy->phyreg_def[rfpath];
436 switch (rfpath) {
437 case RF90_PATH_A:
438 case RF90_PATH_C:
439 u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs,
440 BRFSI_RFENV);
441 break;
442 case RF90_PATH_B:
443 case RF90_PATH_D:
444 u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs,
445 BRFSI_RFENV << 16);
446 break;
447 }
448 rtl_set_bbreg(hw, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1);
449 udelay(1);
450 rtl_set_bbreg(hw, pphyreg->rfintfo, BRFSI_RFENV, 0x1);
451 udelay(1);
452 rtl_set_bbreg(hw, pphyreg->rfhssi_para2,
453 B3WIREADDREAALENGTH, 0x0);
454 udelay(1);
455 rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREDATALENGTH, 0x0);
456 udelay(1);
457 switch (rfpath) {
458 case RF90_PATH_A:
459 rtstatus = rtl92c_phy_config_rf_with_headerfile(hw,
460 (enum radio_path) rfpath);
461 break;
462 case RF90_PATH_B:
463 rtstatus = rtl92c_phy_config_rf_with_headerfile(hw,
464 (enum radio_path) rfpath);
465 break;
466 case RF90_PATH_C:
467 break;
468 case RF90_PATH_D:
469 break;
470 }
471 switch (rfpath) {
472 case RF90_PATH_A:
473 case RF90_PATH_C:
474 rtl_set_bbreg(hw, pphyreg->rfintfs,
475 BRFSI_RFENV, u4_regvalue);
476 break;
477 case RF90_PATH_B:
478 case RF90_PATH_D:
479 rtl_set_bbreg(hw, pphyreg->rfintfs,
480 BRFSI_RFENV << 16, u4_regvalue);
481 break;
482 }
483 if (rtstatus != true) {
484 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
485 ("Radio[%d] Fail!!", rfpath));
486 goto phy_rf_cfg_fail;
487 }
488 }
489 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("<---\n"));
490 return rtstatus;
491phy_rf_cfg_fail:
492 return rtstatus;
493}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
new file mode 100644
index 00000000000..c4ed125ef4d
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
@@ -0,0 +1,30 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../rtl8192ce/rf.h"
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
new file mode 100644
index 00000000000..4e937e0da8e
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -0,0 +1,327 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../wifi.h"
31#include "../core.h"
32#include "../usb.h"
33#include "../efuse.h"
34#include "reg.h"
35#include "def.h"
36#include "phy.h"
37#include "mac.h"
38#include "dm.h"
39#include "sw.h"
40#include "trx.h"
41#include "led.h"
42#include "hw.h"
43
44
45MODULE_AUTHOR("Georgia <georgia@realtek.com>");
46MODULE_AUTHOR("Ziv Huang <ziv_huang@realtek.com>");
47MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");
48MODULE_LICENSE("GPL");
49MODULE_DESCRIPTION("Realtek 8192C/8188C 802.11n USB wireless");
50MODULE_FIRMWARE("rtlwifi/rtl8192cufw.bin");
51
52static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
53{
54 struct rtl_priv *rtlpriv = rtl_priv(hw);
55
56 rtlpriv->dm.dm_initialgain_enable = 1;
57 rtlpriv->dm.dm_flag = 0;
58 rtlpriv->dm.disable_framebursting = 0;
59 rtlpriv->dm.thermalvalue = 0;
60 rtlpriv->rtlhal.pfirmware = vmalloc(0x4000);
61 if (!rtlpriv->rtlhal.pfirmware) {
62 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
63 ("Can't alloc buffer for fw.\n"));
64 return 1;
65 }
66 return 0;
67}
68
69static void rtl92cu_deinit_sw_vars(struct ieee80211_hw *hw)
70{
71 struct rtl_priv *rtlpriv = rtl_priv(hw);
72
73 if (rtlpriv->rtlhal.pfirmware) {
74 vfree(rtlpriv->rtlhal.pfirmware);
75 rtlpriv->rtlhal.pfirmware = NULL;
76 }
77}
78
79static struct rtl_hal_ops rtl8192cu_hal_ops = {
80 .init_sw_vars = rtl92cu_init_sw_vars,
81 .deinit_sw_vars = rtl92cu_deinit_sw_vars,
82 .read_chip_version = rtl92c_read_chip_version,
83 .read_eeprom_info = rtl92cu_read_eeprom_info,
84 .enable_interrupt = rtl92c_enable_interrupt,
85 .disable_interrupt = rtl92c_disable_interrupt,
86 .hw_init = rtl92cu_hw_init,
87 .hw_disable = rtl92cu_card_disable,
88 .set_network_type = rtl92cu_set_network_type,
89 .set_chk_bssid = rtl92cu_set_check_bssid,
90 .set_qos = rtl92c_set_qos,
91 .set_bcn_reg = rtl92cu_set_beacon_related_registers,
92 .set_bcn_intv = rtl92cu_set_beacon_interval,
93 .update_interrupt_mask = rtl92cu_update_interrupt_mask,
94 .get_hw_reg = rtl92cu_get_hw_reg,
95 .set_hw_reg = rtl92cu_set_hw_reg,
96 .update_rate_table = rtl92cu_update_hal_rate_table,
97 .update_rate_mask = rtl92cu_update_hal_rate_mask,
98 .fill_tx_desc = rtl92cu_tx_fill_desc,
99 .fill_fake_txdesc = rtl92cu_fill_fake_txdesc,
100 .fill_tx_cmddesc = rtl92cu_tx_fill_cmddesc,
101 .cmd_send_packet = rtl92cu_cmd_send_packet,
102 .query_rx_desc = rtl92cu_rx_query_desc,
103 .set_channel_access = rtl92cu_update_channel_access_setting,
104 .radio_onoff_checking = rtl92cu_gpio_radio_on_off_checking,
105 .set_bw_mode = rtl92c_phy_set_bw_mode,
106 .switch_channel = rtl92c_phy_sw_chnl,
107 .dm_watchdog = rtl92c_dm_watchdog,
108 .scan_operation_backup = rtl92c_phy_scan_operation_backup,
109 .set_rf_power_state = rtl92c_phy_set_rf_power_state,
110 .led_control = rtl92cu_led_control,
111 .enable_hw_sec = rtl92cu_enable_hw_security_config,
112 .set_key = rtl92c_set_key,
113 .init_sw_leds = rtl92cu_init_sw_leds,
114 .deinit_sw_leds = rtl92cu_deinit_sw_leds,
115 .get_bbreg = rtl92c_phy_query_bb_reg,
116 .set_bbreg = rtl92c_phy_set_bb_reg,
117 .get_rfreg = rtl92c_phy_query_rf_reg,
118 .set_rfreg = rtl92c_phy_set_rf_reg,
119};
120
121static struct rtl_mod_params rtl92cu_mod_params = {
122 .sw_crypto = 0,
123};
124
125static struct rtl_hal_usbint_cfg rtl92cu_interface_cfg = {
126 /* rx */
127 .in_ep_num = RTL92C_USB_BULK_IN_NUM,
128 .rx_urb_num = RTL92C_NUM_RX_URBS,
129 .rx_max_size = RTL92C_SIZE_MAX_RX_BUFFER,
130 .usb_rx_hdl = rtl8192cu_rx_hdl,
131 .usb_rx_segregate_hdl = NULL, /* rtl8192c_rx_segregate_hdl; */
132 /* tx */
133 .usb_tx_cleanup = rtl8192c_tx_cleanup,
134 .usb_tx_post_hdl = rtl8192c_tx_post_hdl,
135 .usb_tx_aggregate_hdl = rtl8192c_tx_aggregate_hdl,
136 /* endpoint mapping */
137 .usb_endpoint_mapping = rtl8192cu_endpoint_mapping,
138 .usb_mq_to_hwq = rtl8192cu_mq_to_hwq,
139};
140
141static struct rtl_hal_cfg rtl92cu_hal_cfg = {
142 .name = "rtl92c_usb",
143 .fw_name = "rtlwifi/rtl8192cufw.bin",
144 .ops = &rtl8192cu_hal_ops,
145 .mod_params = &rtl92cu_mod_params,
146 .usb_interface_cfg = &rtl92cu_interface_cfg,
147
148 .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
149 .maps[SYS_FUNC_EN] = REG_SYS_FUNC_EN,
150 .maps[SYS_CLK] = REG_SYS_CLKR,
151 .maps[MAC_RCR_AM] = AM,
152 .maps[MAC_RCR_AB] = AB,
153 .maps[MAC_RCR_ACRC32] = ACRC32,
154 .maps[MAC_RCR_ACF] = ACF,
155 .maps[MAC_RCR_AAP] = AAP,
156
157 .maps[EFUSE_TEST] = REG_EFUSE_TEST,
158 .maps[EFUSE_CTRL] = REG_EFUSE_CTRL,
159 .maps[EFUSE_CLK] = 0,
160 .maps[EFUSE_CLK_CTRL] = REG_EFUSE_CTRL,
161 .maps[EFUSE_PWC_EV12V] = PWC_EV12V,
162 .maps[EFUSE_FEN_ELDR] = FEN_ELDR,
163 .maps[EFUSE_LOADER_CLK_EN] = LOADER_CLK_EN,
164 .maps[EFUSE_ANA8M] = EFUSE_ANA8M,
165 .maps[EFUSE_HWSET_MAX_SIZE] = HWSET_MAX_SIZE,
166 .maps[EFUSE_MAX_SECTION_MAP] = EFUSE_MAX_SECTION,
167 .maps[EFUSE_REAL_CONTENT_SIZE] = EFUSE_REAL_CONTENT_LEN,
168
169 .maps[RWCAM] = REG_CAMCMD,
170 .maps[WCAMI] = REG_CAMWRITE,
171 .maps[RCAMO] = REG_CAMREAD,
172 .maps[CAMDBG] = REG_CAMDBG,
173 .maps[SECR] = REG_SECCFG,
174 .maps[SEC_CAM_NONE] = CAM_NONE,
175 .maps[SEC_CAM_WEP40] = CAM_WEP40,
176 .maps[SEC_CAM_TKIP] = CAM_TKIP,
177 .maps[SEC_CAM_AES] = CAM_AES,
178 .maps[SEC_CAM_WEP104] = CAM_WEP104,
179
180 .maps[RTL_IMR_BCNDMAINT6] = IMR_BCNDMAINT6,
181 .maps[RTL_IMR_BCNDMAINT5] = IMR_BCNDMAINT5,
182 .maps[RTL_IMR_BCNDMAINT4] = IMR_BCNDMAINT4,
183 .maps[RTL_IMR_BCNDMAINT3] = IMR_BCNDMAINT3,
184 .maps[RTL_IMR_BCNDMAINT2] = IMR_BCNDMAINT2,
185 .maps[RTL_IMR_BCNDMAINT1] = IMR_BCNDMAINT1,
186 .maps[RTL_IMR_BCNDOK8] = IMR_BCNDOK8,
187 .maps[RTL_IMR_BCNDOK7] = IMR_BCNDOK7,
188 .maps[RTL_IMR_BCNDOK6] = IMR_BCNDOK6,
189 .maps[RTL_IMR_BCNDOK5] = IMR_BCNDOK5,
190 .maps[RTL_IMR_BCNDOK4] = IMR_BCNDOK4,
191 .maps[RTL_IMR_BCNDOK3] = IMR_BCNDOK3,
192 .maps[RTL_IMR_BCNDOK2] = IMR_BCNDOK2,
193 .maps[RTL_IMR_BCNDOK1] = IMR_BCNDOK1,
194 .maps[RTL_IMR_TIMEOUT2] = IMR_TIMEOUT2,
195 .maps[RTL_IMR_TIMEOUT1] = IMR_TIMEOUT1,
196
197 .maps[RTL_IMR_TXFOVW] = IMR_TXFOVW,
198 .maps[RTL_IMR_PSTIMEOUT] = IMR_PSTIMEOUT,
199 .maps[RTL_IMR_BcnInt] = IMR_BCNINT,
200 .maps[RTL_IMR_RXFOVW] = IMR_RXFOVW,
201 .maps[RTL_IMR_RDU] = IMR_RDU,
202 .maps[RTL_IMR_ATIMEND] = IMR_ATIMEND,
203 .maps[RTL_IMR_BDOK] = IMR_BDOK,
204 .maps[RTL_IMR_MGNTDOK] = IMR_MGNTDOK,
205 .maps[RTL_IMR_TBDER] = IMR_TBDER,
206 .maps[RTL_IMR_HIGHDOK] = IMR_HIGHDOK,
207 .maps[RTL_IMR_TBDOK] = IMR_TBDOK,
208 .maps[RTL_IMR_BKDOK] = IMR_BKDOK,
209 .maps[RTL_IMR_BEDOK] = IMR_BEDOK,
210 .maps[RTL_IMR_VIDOK] = IMR_VIDOK,
211 .maps[RTL_IMR_VODOK] = IMR_VODOK,
212 .maps[RTL_IMR_ROK] = IMR_ROK,
213 .maps[RTL_IBSS_INT_MASKS] = (IMR_BCNINT | IMR_TBDOK | IMR_TBDER),
214
215 .maps[RTL_RC_CCK_RATE1M] = DESC92C_RATE1M,
216 .maps[RTL_RC_CCK_RATE2M] = DESC92C_RATE2M,
217 .maps[RTL_RC_CCK_RATE5_5M] = DESC92C_RATE5_5M,
218 .maps[RTL_RC_CCK_RATE11M] = DESC92C_RATE11M,
219 .maps[RTL_RC_OFDM_RATE6M] = DESC92C_RATE6M,
220 .maps[RTL_RC_OFDM_RATE9M] = DESC92C_RATE9M,
221 .maps[RTL_RC_OFDM_RATE12M] = DESC92C_RATE12M,
222 .maps[RTL_RC_OFDM_RATE18M] = DESC92C_RATE18M,
223 .maps[RTL_RC_OFDM_RATE24M] = DESC92C_RATE24M,
224 .maps[RTL_RC_OFDM_RATE36M] = DESC92C_RATE36M,
225 .maps[RTL_RC_OFDM_RATE48M] = DESC92C_RATE48M,
226 .maps[RTL_RC_OFDM_RATE54M] = DESC92C_RATE54M,
227 .maps[RTL_RC_HT_RATEMCS7] = DESC92C_RATEMCS7,
228 .maps[RTL_RC_HT_RATEMCS15] = DESC92C_RATEMCS15,
229};
230
231#define USB_VENDER_ID_REALTEK 0x0bda
232
233/* 2010-10-19 DID_USB_V3.4 */
234static struct usb_device_id rtl8192c_usb_ids[] = {
235
236 /*=== Realtek demoboard ===*/
237 /* Default ID */
238 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8191, rtl92cu_hal_cfg)},
239
240 /****** 8188CU ********/
241 /* 8188CE-VAU USB minCard */
242 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8170, rtl92cu_hal_cfg)},
243 /* 8188cu 1*1 dongle */
244 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8176, rtl92cu_hal_cfg)},
245 /* 8188cu 1*1 dongle, (b/g mode only) */
246 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8177, rtl92cu_hal_cfg)},
247 /* 8188cu Slim Solo */
248 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817a, rtl92cu_hal_cfg)},
249 /* 8188cu Slim Combo */
250 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817b, rtl92cu_hal_cfg)},
251 /* 8188RU High-power USB Dongle */
252 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817d, rtl92cu_hal_cfg)},
253 /* 8188CE-VAU USB minCard (b/g mode only) */
254 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817e, rtl92cu_hal_cfg)},
255 /* 8188 Combo for BC4 */
256 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)},
257
258 /****** 8192CU ********/
259 /* 8191cu 1*2 */
260 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8177, rtl92cu_hal_cfg)},
261 /* 8192cu 2*2 */
262 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817b, rtl92cu_hal_cfg)},
263 /* 8192CE-VAU USB minCard */
264 {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817c, rtl92cu_hal_cfg)},
265
266 /*=== Customer ID ===*/
267 /****** 8188CU ********/
268 {RTL_USB_DEVICE(0x050d, 0x1102, rtl92cu_hal_cfg)}, /*Belkin - Edimax*/
269 {RTL_USB_DEVICE(0x06f8, 0xe033, rtl92cu_hal_cfg)}, /*Hercules - Edimax*/
270 {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
271 {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
272 {RTL_USB_DEVICE(0x0Df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
273 {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
274 /* HP - Lite-On ,8188CUS Slim Combo */
275 {RTL_USB_DEVICE(0x103c, 0x1629, rtl92cu_hal_cfg)},
276 {RTL_USB_DEVICE(0x2001, 0x3308, rtl92cu_hal_cfg)}, /*D-Link - Alpha*/
277 {RTL_USB_DEVICE(0x2019, 0xab2a, rtl92cu_hal_cfg)}, /*Planex - Abocom*/
278 {RTL_USB_DEVICE(0x2019, 0xed17, rtl92cu_hal_cfg)}, /*PCI - Edimax*/
279 {RTL_USB_DEVICE(0x20f4, 0x648b, rtl92cu_hal_cfg)}, /*TRENDnet - Cameo*/
280 {RTL_USB_DEVICE(0x7392, 0x7811, rtl92cu_hal_cfg)}, /*Edimax - Edimax*/
281 {RTL_USB_DEVICE(0x3358, 0x13d3, rtl92cu_hal_cfg)}, /*Azwave 8188CE-VAU*/
282 /* Russian customer -Azwave (8188CE-VAU b/g mode only) */
283 {RTL_USB_DEVICE(0x3359, 0x13d3, rtl92cu_hal_cfg)},
284
285 /****** 8192CU ********/
286 {RTL_USB_DEVICE(0x0586, 0x341f, rtl92cu_hal_cfg)}, /*Zyxel -Abocom*/
287 {RTL_USB_DEVICE(0x07aa, 0x0056, rtl92cu_hal_cfg)}, /*ATKK-Gemtek*/
288 {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/
289 {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Abocom -Abocom*/
290 {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
291 {RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
292 {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
293 {RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/
294 {RTL_USB_DEVICE(0x7392, 0x7822, rtl92cu_hal_cfg)}, /*Edimax -Edimax*/
295 {}
296};
297
298MODULE_DEVICE_TABLE(usb, rtl8192c_usb_ids);
299
300static struct usb_driver rtl8192cu_driver = {
301 .name = "rtl8192cu",
302 .probe = rtl_usb_probe,
303 .disconnect = rtl_usb_disconnect,
304 .id_table = rtl8192c_usb_ids,
305
306#ifdef CONFIG_PM
307 /* .suspend = rtl_usb_suspend, */
308 /* .resume = rtl_usb_resume, */
309 /* .reset_resume = rtl8192c_resume, */
310#endif /* CONFIG_PM */
311#ifdef CONFIG_AUTOSUSPEND
312 .supports_autosuspend = 1,
313#endif
314};
315
316static int __init rtl8192cu_init(void)
317{
318 return usb_register(&rtl8192cu_driver);
319}
320
321static void __exit rtl8192cu_exit(void)
322{
323 usb_deregister(&rtl8192cu_driver);
324}
325
326module_init(rtl8192cu_init);
327module_exit(rtl8192cu_exit);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h
new file mode 100644
index 00000000000..3b2c6633955
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h
@@ -0,0 +1,35 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL92CU_SW_H__
31#define __RTL92CU_SW_H__
32
33#define EFUSE_MAX_SECTION 16
34
35#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/table.c b/drivers/net/wireless/rtlwifi/rtl8192cu/table.c
new file mode 100644
index 00000000000..d57ef5e88a9
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/table.c
@@ -0,0 +1,1888 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "table.h"
31
32u32 RTL8192CUPHY_REG_2TARRAY[RTL8192CUPHY_REG_2TARRAY_LENGTH] = {
33 0x024, 0x0011800f,
34 0x028, 0x00ffdb83,
35 0x800, 0x80040002,
36 0x804, 0x00000003,
37 0x808, 0x0000fc00,
38 0x80c, 0x0000000a,
39 0x810, 0x10005388,
40 0x814, 0x020c3d10,
41 0x818, 0x02200385,
42 0x81c, 0x00000000,
43 0x820, 0x01000100,
44 0x824, 0x00390004,
45 0x828, 0x01000100,
46 0x82c, 0x00390004,
47 0x830, 0x27272727,
48 0x834, 0x27272727,
49 0x838, 0x27272727,
50 0x83c, 0x27272727,
51 0x840, 0x00010000,
52 0x844, 0x00010000,
53 0x848, 0x27272727,
54 0x84c, 0x27272727,
55 0x850, 0x00000000,
56 0x854, 0x00000000,
57 0x858, 0x569a569a,
58 0x85c, 0x0c1b25a4,
59 0x860, 0x66e60230,
60 0x864, 0x061f0130,
61 0x868, 0x27272727,
62 0x86c, 0x2b2b2b27,
63 0x870, 0x07000700,
64 0x874, 0x22184000,
65 0x878, 0x08080808,
66 0x87c, 0x00000000,
67 0x880, 0xc0083070,
68 0x884, 0x000004d5,
69 0x888, 0x00000000,
70 0x88c, 0xcc0000c0,
71 0x890, 0x00000800,
72 0x894, 0xfffffffe,
73 0x898, 0x40302010,
74 0x89c, 0x00706050,
75 0x900, 0x00000000,
76 0x904, 0x00000023,
77 0x908, 0x00000000,
78 0x90c, 0x81121313,
79 0xa00, 0x00d047c8,
80 0xa04, 0x80ff000c,
81 0xa08, 0x8c838300,
82 0xa0c, 0x2e68120f,
83 0xa10, 0x9500bb78,
84 0xa14, 0x11144028,
85 0xa18, 0x00881117,
86 0xa1c, 0x89140f00,
87 0xa20, 0x1a1b0000,
88 0xa24, 0x090e1317,
89 0xa28, 0x00000204,
90 0xa2c, 0x00d30000,
91 0xa70, 0x101fbf00,
92 0xa74, 0x00000007,
93 0xc00, 0x48071d40,
94 0xc04, 0x03a05633,
95 0xc08, 0x000000e4,
96 0xc0c, 0x6c6c6c6c,
97 0xc10, 0x08800000,
98 0xc14, 0x40000100,
99 0xc18, 0x08800000,
100 0xc1c, 0x40000100,
101 0xc20, 0x00000000,
102 0xc24, 0x00000000,
103 0xc28, 0x00000000,
104 0xc2c, 0x00000000,
105 0xc30, 0x69e9ac44,
106 0xc34, 0x469652cf,
107 0xc38, 0x49795994,
108 0xc3c, 0x0a97971c,
109 0xc40, 0x1f7c403f,
110 0xc44, 0x000100b7,
111 0xc48, 0xec020107,
112 0xc4c, 0x007f037f,
113 0xc50, 0x6954341e,
114 0xc54, 0x43bc0094,
115 0xc58, 0x6954341e,
116 0xc5c, 0x433c0094,
117 0xc60, 0x00000000,
118 0xc64, 0x5116848b,
119 0xc68, 0x47c00bff,
120 0xc6c, 0x00000036,
121 0xc70, 0x2c7f000d,
122 0xc74, 0x0186115b,
123 0xc78, 0x0000001f,
124 0xc7c, 0x00b99612,
125 0xc80, 0x40000100,
126 0xc84, 0x20f60000,
127 0xc88, 0x40000100,
128 0xc8c, 0x20200000,
129 0xc90, 0x00121820,
130 0xc94, 0x00000000,
131 0xc98, 0x00121820,
132 0xc9c, 0x00007f7f,
133 0xca0, 0x00000000,
134 0xca4, 0x00000080,
135 0xca8, 0x00000000,
136 0xcac, 0x00000000,
137 0xcb0, 0x00000000,
138 0xcb4, 0x00000000,
139 0xcb8, 0x00000000,
140 0xcbc, 0x28000000,
141 0xcc0, 0x00000000,
142 0xcc4, 0x00000000,
143 0xcc8, 0x00000000,
144 0xccc, 0x00000000,
145 0xcd0, 0x00000000,
146 0xcd4, 0x00000000,
147 0xcd8, 0x64b22427,
148 0xcdc, 0x00766932,
149 0xce0, 0x00222222,
150 0xce4, 0x00000000,
151 0xce8, 0x37644302,
152 0xcec, 0x2f97d40c,
153 0xd00, 0x00080740,
154 0xd04, 0x00020403,
155 0xd08, 0x0000907f,
156 0xd0c, 0x20010201,
157 0xd10, 0xa0633333,
158 0xd14, 0x3333bc43,
159 0xd18, 0x7a8f5b6b,
160 0xd2c, 0xcc979975,
161 0xd30, 0x00000000,
162 0xd34, 0x80608000,
163 0xd38, 0x00000000,
164 0xd3c, 0x00027293,
165 0xd40, 0x00000000,
166 0xd44, 0x00000000,
167 0xd48, 0x00000000,
168 0xd4c, 0x00000000,
169 0xd50, 0x6437140a,
170 0xd54, 0x00000000,
171 0xd58, 0x00000000,
172 0xd5c, 0x30032064,
173 0xd60, 0x4653de68,
174 0xd64, 0x04518a3c,
175 0xd68, 0x00002101,
176 0xd6c, 0x2a201c16,
177 0xd70, 0x1812362e,
178 0xd74, 0x322c2220,
179 0xd78, 0x000e3c24,
180 0xe00, 0x2a2a2a2a,
181 0xe04, 0x2a2a2a2a,
182 0xe08, 0x03902a2a,
183 0xe10, 0x2a2a2a2a,
184 0xe14, 0x2a2a2a2a,
185 0xe18, 0x2a2a2a2a,
186 0xe1c, 0x2a2a2a2a,
187 0xe28, 0x00000000,
188 0xe30, 0x1000dc1f,
189 0xe34, 0x10008c1f,
190 0xe38, 0x02140102,
191 0xe3c, 0x681604c2,
192 0xe40, 0x01007c00,
193 0xe44, 0x01004800,
194 0xe48, 0xfb000000,
195 0xe4c, 0x000028d1,
196 0xe50, 0x1000dc1f,
197 0xe54, 0x10008c1f,
198 0xe58, 0x02140102,
199 0xe5c, 0x28160d05,
200 0xe60, 0x00000010,
201 0xe68, 0x001b25a4,
202 0xe6c, 0x63db25a4,
203 0xe70, 0x63db25a4,
204 0xe74, 0x0c1b25a4,
205 0xe78, 0x0c1b25a4,
206 0xe7c, 0x0c1b25a4,
207 0xe80, 0x0c1b25a4,
208 0xe84, 0x63db25a4,
209 0xe88, 0x0c1b25a4,
210 0xe8c, 0x63db25a4,
211 0xed0, 0x63db25a4,
212 0xed4, 0x63db25a4,
213 0xed8, 0x63db25a4,
214 0xedc, 0x001b25a4,
215 0xee0, 0x001b25a4,
216 0xeec, 0x6fdb25a4,
217 0xf14, 0x00000003,
218 0xf4c, 0x00000000,
219 0xf00, 0x00000300,
220};
221
222u32 RTL8192CUPHY_REG_1TARRAY[RTL8192CUPHY_REG_1TARRAY_LENGTH] = {
223 0x024, 0x0011800f,
224 0x028, 0x00ffdb83,
225 0x800, 0x80040000,
226 0x804, 0x00000001,
227 0x808, 0x0000fc00,
228 0x80c, 0x0000000a,
229 0x810, 0x10005388,
230 0x814, 0x020c3d10,
231 0x818, 0x02200385,
232 0x81c, 0x00000000,
233 0x820, 0x01000100,
234 0x824, 0x00390004,
235 0x828, 0x00000000,
236 0x82c, 0x00000000,
237 0x830, 0x00000000,
238 0x834, 0x00000000,
239 0x838, 0x00000000,
240 0x83c, 0x00000000,
241 0x840, 0x00010000,
242 0x844, 0x00000000,
243 0x848, 0x00000000,
244 0x84c, 0x00000000,
245 0x850, 0x00000000,
246 0x854, 0x00000000,
247 0x858, 0x569a569a,
248 0x85c, 0x001b25a4,
249 0x860, 0x66e60230,
250 0x864, 0x061f0130,
251 0x868, 0x00000000,
252 0x86c, 0x32323200,
253 0x870, 0x07000700,
254 0x874, 0x22004000,
255 0x878, 0x00000808,
256 0x87c, 0x00000000,
257 0x880, 0xc0083070,
258 0x884, 0x000004d5,
259 0x888, 0x00000000,
260 0x88c, 0xccc000c0,
261 0x890, 0x00000800,
262 0x894, 0xfffffffe,
263 0x898, 0x40302010,
264 0x89c, 0x00706050,
265 0x900, 0x00000000,
266 0x904, 0x00000023,
267 0x908, 0x00000000,
268 0x90c, 0x81121111,
269 0xa00, 0x00d047c8,
270 0xa04, 0x80ff000c,
271 0xa08, 0x8c838300,
272 0xa0c, 0x2e68120f,
273 0xa10, 0x9500bb78,
274 0xa14, 0x11144028,
275 0xa18, 0x00881117,
276 0xa1c, 0x89140f00,
277 0xa20, 0x1a1b0000,
278 0xa24, 0x090e1317,
279 0xa28, 0x00000204,
280 0xa2c, 0x00d30000,
281 0xa70, 0x101fbf00,
282 0xa74, 0x00000007,
283 0xc00, 0x48071d40,
284 0xc04, 0x03a05611,
285 0xc08, 0x000000e4,
286 0xc0c, 0x6c6c6c6c,
287 0xc10, 0x08800000,
288 0xc14, 0x40000100,
289 0xc18, 0x08800000,
290 0xc1c, 0x40000100,
291 0xc20, 0x00000000,
292 0xc24, 0x00000000,
293 0xc28, 0x00000000,
294 0xc2c, 0x00000000,
295 0xc30, 0x69e9ac44,
296 0xc34, 0x469652cf,
297 0xc38, 0x49795994,
298 0xc3c, 0x0a97971c,
299 0xc40, 0x1f7c403f,
300 0xc44, 0x000100b7,
301 0xc48, 0xec020107,
302 0xc4c, 0x007f037f,
303 0xc50, 0x6954341e,
304 0xc54, 0x43bc0094,
305 0xc58, 0x6954341e,
306 0xc5c, 0x433c0094,
307 0xc60, 0x00000000,
308 0xc64, 0x5116848b,
309 0xc68, 0x47c00bff,
310 0xc6c, 0x00000036,
311 0xc70, 0x2c7f000d,
312 0xc74, 0x018610db,
313 0xc78, 0x0000001f,
314 0xc7c, 0x00b91612,
315 0xc80, 0x40000100,
316 0xc84, 0x20f60000,
317 0xc88, 0x40000100,
318 0xc8c, 0x20200000,
319 0xc90, 0x00121820,
320 0xc94, 0x00000000,
321 0xc98, 0x00121820,
322 0xc9c, 0x00007f7f,
323 0xca0, 0x00000000,
324 0xca4, 0x00000080,
325 0xca8, 0x00000000,
326 0xcac, 0x00000000,
327 0xcb0, 0x00000000,
328 0xcb4, 0x00000000,
329 0xcb8, 0x00000000,
330 0xcbc, 0x28000000,
331 0xcc0, 0x00000000,
332 0xcc4, 0x00000000,
333 0xcc8, 0x00000000,
334 0xccc, 0x00000000,
335 0xcd0, 0x00000000,
336 0xcd4, 0x00000000,
337 0xcd8, 0x64b22427,
338 0xcdc, 0x00766932,
339 0xce0, 0x00222222,
340 0xce4, 0x00000000,
341 0xce8, 0x37644302,
342 0xcec, 0x2f97d40c,
343 0xd00, 0x00080740,
344 0xd04, 0x00020401,
345 0xd08, 0x0000907f,
346 0xd0c, 0x20010201,
347 0xd10, 0xa0633333,
348 0xd14, 0x3333bc43,
349 0xd18, 0x7a8f5b6b,
350 0xd2c, 0xcc979975,
351 0xd30, 0x00000000,
352 0xd34, 0x80608000,
353 0xd38, 0x00000000,
354 0xd3c, 0x00027293,
355 0xd40, 0x00000000,
356 0xd44, 0x00000000,
357 0xd48, 0x00000000,
358 0xd4c, 0x00000000,
359 0xd50, 0x6437140a,
360 0xd54, 0x00000000,
361 0xd58, 0x00000000,
362 0xd5c, 0x30032064,
363 0xd60, 0x4653de68,
364 0xd64, 0x04518a3c,
365 0xd68, 0x00002101,
366 0xd6c, 0x2a201c16,
367 0xd70, 0x1812362e,
368 0xd74, 0x322c2220,
369 0xd78, 0x000e3c24,
370 0xe00, 0x2a2a2a2a,
371 0xe04, 0x2a2a2a2a,
372 0xe08, 0x03902a2a,
373 0xe10, 0x2a2a2a2a,
374 0xe14, 0x2a2a2a2a,
375 0xe18, 0x2a2a2a2a,
376 0xe1c, 0x2a2a2a2a,
377 0xe28, 0x00000000,
378 0xe30, 0x1000dc1f,
379 0xe34, 0x10008c1f,
380 0xe38, 0x02140102,
381 0xe3c, 0x681604c2,
382 0xe40, 0x01007c00,
383 0xe44, 0x01004800,
384 0xe48, 0xfb000000,
385 0xe4c, 0x000028d1,
386 0xe50, 0x1000dc1f,
387 0xe54, 0x10008c1f,
388 0xe58, 0x02140102,
389 0xe5c, 0x28160d05,
390 0xe60, 0x00000008,
391 0xe68, 0x001b25a4,
392 0xe6c, 0x631b25a0,
393 0xe70, 0x631b25a0,
394 0xe74, 0x081b25a0,
395 0xe78, 0x081b25a0,
396 0xe7c, 0x081b25a0,
397 0xe80, 0x081b25a0,
398 0xe84, 0x631b25a0,
399 0xe88, 0x081b25a0,
400 0xe8c, 0x631b25a0,
401 0xed0, 0x631b25a0,
402 0xed4, 0x631b25a0,
403 0xed8, 0x631b25a0,
404 0xedc, 0x001b25a0,
405 0xee0, 0x001b25a0,
406 0xeec, 0x6b1b25a0,
407 0xf14, 0x00000003,
408 0xf4c, 0x00000000,
409 0xf00, 0x00000300,
410};
411
412u32 RTL8192CUPHY_REG_ARRAY_PG[RTL8192CUPHY_REG_ARRAY_PGLENGTH] = {
413 0xe00, 0xffffffff, 0x07090c0c,
414 0xe04, 0xffffffff, 0x01020405,
415 0xe08, 0x0000ff00, 0x00000000,
416 0x86c, 0xffffff00, 0x00000000,
417 0xe10, 0xffffffff, 0x0b0c0c0e,
418 0xe14, 0xffffffff, 0x01030506,
419 0xe18, 0xffffffff, 0x0b0c0d0e,
420 0xe1c, 0xffffffff, 0x01030509,
421 0x830, 0xffffffff, 0x07090c0c,
422 0x834, 0xffffffff, 0x01020405,
423 0x838, 0xffffff00, 0x00000000,
424 0x86c, 0x000000ff, 0x00000000,
425 0x83c, 0xffffffff, 0x0b0c0d0e,
426 0x848, 0xffffffff, 0x01030509,
427 0x84c, 0xffffffff, 0x0b0c0d0e,
428 0x868, 0xffffffff, 0x01030509,
429 0xe00, 0xffffffff, 0x00000000,
430 0xe04, 0xffffffff, 0x00000000,
431 0xe08, 0x0000ff00, 0x00000000,
432 0x86c, 0xffffff00, 0x00000000,
433 0xe10, 0xffffffff, 0x00000000,
434 0xe14, 0xffffffff, 0x00000000,
435 0xe18, 0xffffffff, 0x00000000,
436 0xe1c, 0xffffffff, 0x00000000,
437 0x830, 0xffffffff, 0x00000000,
438 0x834, 0xffffffff, 0x00000000,
439 0x838, 0xffffff00, 0x00000000,
440 0x86c, 0x000000ff, 0x00000000,
441 0x83c, 0xffffffff, 0x00000000,
442 0x848, 0xffffffff, 0x00000000,
443 0x84c, 0xffffffff, 0x00000000,
444 0x868, 0xffffffff, 0x00000000,
445 0xe00, 0xffffffff, 0x04040404,
446 0xe04, 0xffffffff, 0x00020204,
447 0xe08, 0x0000ff00, 0x00000000,
448 0x86c, 0xffffff00, 0x00000000,
449 0xe10, 0xffffffff, 0x06060606,
450 0xe14, 0xffffffff, 0x00020406,
451 0xe18, 0xffffffff, 0x00000000,
452 0xe1c, 0xffffffff, 0x00000000,
453 0x830, 0xffffffff, 0x04040404,
454 0x834, 0xffffffff, 0x00020204,
455 0x838, 0xffffff00, 0x00000000,
456 0x86c, 0x000000ff, 0x00000000,
457 0x83c, 0xffffffff, 0x06060606,
458 0x848, 0xffffffff, 0x00020406,
459 0x84c, 0xffffffff, 0x00000000,
460 0x868, 0xffffffff, 0x00000000,
461 0xe00, 0xffffffff, 0x00000000,
462 0xe04, 0xffffffff, 0x00000000,
463 0xe08, 0x0000ff00, 0x00000000,
464 0x86c, 0xffffff00, 0x00000000,
465 0xe10, 0xffffffff, 0x00000000,
466 0xe14, 0xffffffff, 0x00000000,
467 0xe18, 0xffffffff, 0x00000000,
468 0xe1c, 0xffffffff, 0x00000000,
469 0x830, 0xffffffff, 0x00000000,
470 0x834, 0xffffffff, 0x00000000,
471 0x838, 0xffffff00, 0x00000000,
472 0x86c, 0x000000ff, 0x00000000,
473 0x83c, 0xffffffff, 0x00000000,
474 0x848, 0xffffffff, 0x00000000,
475 0x84c, 0xffffffff, 0x00000000,
476 0x868, 0xffffffff, 0x00000000,
477 0xe00, 0xffffffff, 0x00000000,
478 0xe04, 0xffffffff, 0x00000000,
479 0xe08, 0x0000ff00, 0x00000000,
480 0x86c, 0xffffff00, 0x00000000,
481 0xe10, 0xffffffff, 0x00000000,
482 0xe14, 0xffffffff, 0x00000000,
483 0xe18, 0xffffffff, 0x00000000,
484 0xe1c, 0xffffffff, 0x00000000,
485 0x830, 0xffffffff, 0x00000000,
486 0x834, 0xffffffff, 0x00000000,
487 0x838, 0xffffff00, 0x00000000,
488 0x86c, 0x000000ff, 0x00000000,
489 0x83c, 0xffffffff, 0x00000000,
490 0x848, 0xffffffff, 0x00000000,
491 0x84c, 0xffffffff, 0x00000000,
492 0x868, 0xffffffff, 0x00000000,
493 0xe00, 0xffffffff, 0x04040404,
494 0xe04, 0xffffffff, 0x00020204,
495 0xe08, 0x0000ff00, 0x00000000,
496 0x86c, 0xffffff00, 0x00000000,
497 0xe10, 0xffffffff, 0x00000000,
498 0xe14, 0xffffffff, 0x00000000,
499 0xe18, 0xffffffff, 0x00000000,
500 0xe1c, 0xffffffff, 0x00000000,
501 0x830, 0xffffffff, 0x04040404,
502 0x834, 0xffffffff, 0x00020204,
503 0x838, 0xffffff00, 0x00000000,
504 0x86c, 0x000000ff, 0x00000000,
505 0x83c, 0xffffffff, 0x00000000,
506 0x848, 0xffffffff, 0x00000000,
507 0x84c, 0xffffffff, 0x00000000,
508 0x868, 0xffffffff, 0x00000000,
509 0xe00, 0xffffffff, 0x00000000,
510 0xe04, 0xffffffff, 0x00000000,
511 0xe08, 0x0000ff00, 0x00000000,
512 0x86c, 0xffffff00, 0x00000000,
513 0xe10, 0xffffffff, 0x00000000,
514 0xe14, 0xffffffff, 0x00000000,
515 0xe18, 0xffffffff, 0x00000000,
516 0xe1c, 0xffffffff, 0x00000000,
517 0x830, 0xffffffff, 0x00000000,
518 0x834, 0xffffffff, 0x00000000,
519 0x838, 0xffffff00, 0x00000000,
520 0x86c, 0x000000ff, 0x00000000,
521 0x83c, 0xffffffff, 0x00000000,
522 0x848, 0xffffffff, 0x00000000,
523 0x84c, 0xffffffff, 0x00000000,
524 0x868, 0xffffffff, 0x00000000,
525};
526
527u32 RTL8192CURADIOA_2TARRAY[RTL8192CURADIOA_2TARRAYLENGTH] = {
528 0x000, 0x00030159,
529 0x001, 0x00031284,
530 0x002, 0x00098000,
531 0x003, 0x00018c63,
532 0x004, 0x000210e7,
533 0x009, 0x0002044f,
534 0x00a, 0x0001adb1,
535 0x00b, 0x00054867,
536 0x00c, 0x0008992e,
537 0x00d, 0x0000e52c,
538 0x00e, 0x00039ce7,
539 0x00f, 0x00000451,
540 0x019, 0x00000000,
541 0x01a, 0x00010255,
542 0x01b, 0x00060a00,
543 0x01c, 0x000fc378,
544 0x01d, 0x000a1250,
545 0x01e, 0x0004445f,
546 0x01f, 0x00080001,
547 0x020, 0x0000b614,
548 0x021, 0x0006c000,
549 0x022, 0x00000000,
550 0x023, 0x00001558,
551 0x024, 0x00000060,
552 0x025, 0x00000483,
553 0x026, 0x0004f000,
554 0x027, 0x000ec7d9,
555 0x028, 0x000577c0,
556 0x029, 0x00004783,
557 0x02a, 0x00000001,
558 0x02b, 0x00021334,
559 0x02a, 0x00000000,
560 0x02b, 0x00000054,
561 0x02a, 0x00000001,
562 0x02b, 0x00000808,
563 0x02b, 0x00053333,
564 0x02c, 0x0000000c,
565 0x02a, 0x00000002,
566 0x02b, 0x00000808,
567 0x02b, 0x0005b333,
568 0x02c, 0x0000000d,
569 0x02a, 0x00000003,
570 0x02b, 0x00000808,
571 0x02b, 0x00063333,
572 0x02c, 0x0000000d,
573 0x02a, 0x00000004,
574 0x02b, 0x00000808,
575 0x02b, 0x0006b333,
576 0x02c, 0x0000000d,
577 0x02a, 0x00000005,
578 0x02b, 0x00000808,
579 0x02b, 0x00073333,
580 0x02c, 0x0000000d,
581 0x02a, 0x00000006,
582 0x02b, 0x00000709,
583 0x02b, 0x0005b333,
584 0x02c, 0x0000000d,
585 0x02a, 0x00000007,
586 0x02b, 0x00000709,
587 0x02b, 0x00063333,
588 0x02c, 0x0000000d,
589 0x02a, 0x00000008,
590 0x02b, 0x0000060a,
591 0x02b, 0x0004b333,
592 0x02c, 0x0000000d,
593 0x02a, 0x00000009,
594 0x02b, 0x0000060a,
595 0x02b, 0x00053333,
596 0x02c, 0x0000000d,
597 0x02a, 0x0000000a,
598 0x02b, 0x0000060a,
599 0x02b, 0x0005b333,
600 0x02c, 0x0000000d,
601 0x02a, 0x0000000b,
602 0x02b, 0x0000060a,
603 0x02b, 0x00063333,
604 0x02c, 0x0000000d,
605 0x02a, 0x0000000c,
606 0x02b, 0x0000060a,
607 0x02b, 0x0006b333,
608 0x02c, 0x0000000d,
609 0x02a, 0x0000000d,
610 0x02b, 0x0000060a,
611 0x02b, 0x00073333,
612 0x02c, 0x0000000d,
613 0x02a, 0x0000000e,
614 0x02b, 0x0000050b,
615 0x02b, 0x00066666,
616 0x02c, 0x0000001a,
617 0x02a, 0x000e0000,
618 0x010, 0x0004000f,
619 0x011, 0x000e31fc,
620 0x010, 0x0006000f,
621 0x011, 0x000ff9f8,
622 0x010, 0x0002000f,
623 0x011, 0x000203f9,
624 0x010, 0x0003000f,
625 0x011, 0x000ff500,
626 0x010, 0x00000000,
627 0x011, 0x00000000,
628 0x010, 0x0008000f,
629 0x011, 0x0003f100,
630 0x010, 0x0009000f,
631 0x011, 0x00023100,
632 0x012, 0x00032000,
633 0x012, 0x00071000,
634 0x012, 0x000b0000,
635 0x012, 0x000fc000,
636 0x013, 0x000287af,
637 0x013, 0x000244b7,
638 0x013, 0x000204ab,
639 0x013, 0x0001c49f,
640 0x013, 0x00018493,
641 0x013, 0x00014297,
642 0x013, 0x00010295,
643 0x013, 0x0000c298,
644 0x013, 0x0000819c,
645 0x013, 0x000040a8,
646 0x013, 0x0000001c,
647 0x014, 0x0001944c,
648 0x014, 0x00059444,
649 0x014, 0x0009944c,
650 0x014, 0x000d9444,
651 0x015, 0x0000f424,
652 0x015, 0x0004f424,
653 0x015, 0x0008f424,
654 0x015, 0x000cf424,
655 0x016, 0x000e0330,
656 0x016, 0x000a0330,
657 0x016, 0x00060330,
658 0x016, 0x00020330,
659 0x000, 0x00010159,
660 0x018, 0x0000f401,
661 0x0fe, 0x00000000,
662 0x0fe, 0x00000000,
663 0x01f, 0x00080003,
664 0x0fe, 0x00000000,
665 0x0fe, 0x00000000,
666 0x01e, 0x00044457,
667 0x01f, 0x00080000,
668 0x000, 0x00030159,
669};
670
671u32 RTL8192CU_RADIOB_2TARRAY[RTL8192CURADIOB_2TARRAYLENGTH] = {
672 0x000, 0x00030159,
673 0x001, 0x00031284,
674 0x002, 0x00098000,
675 0x003, 0x00018c63,
676 0x004, 0x000210e7,
677 0x009, 0x0002044f,
678 0x00a, 0x0001adb1,
679 0x00b, 0x00054867,
680 0x00c, 0x0008992e,
681 0x00d, 0x0000e52c,
682 0x00e, 0x00039ce7,
683 0x00f, 0x00000451,
684 0x012, 0x00032000,
685 0x012, 0x00071000,
686 0x012, 0x000b0000,
687 0x012, 0x000fc000,
688 0x013, 0x000287af,
689 0x013, 0x000244b7,
690 0x013, 0x000204ab,
691 0x013, 0x0001c49f,
692 0x013, 0x00018493,
693 0x013, 0x00014297,
694 0x013, 0x00010295,
695 0x013, 0x0000c298,
696 0x013, 0x0000819c,
697 0x013, 0x000040a8,
698 0x013, 0x0000001c,
699 0x014, 0x0001944c,
700 0x014, 0x00059444,
701 0x014, 0x0009944c,
702 0x014, 0x000d9444,
703 0x015, 0x0000f424,
704 0x015, 0x0004f424,
705 0x015, 0x0008f424,
706 0x015, 0x000cf424,
707 0x016, 0x000e0330,
708 0x016, 0x000a0330,
709 0x016, 0x00060330,
710 0x016, 0x00020330,
711};
712
713u32 RTL8192CU_RADIOA_1TARRAY[RTL8192CURADIOA_1TARRAYLENGTH] = {
714 0x000, 0x00030159,
715 0x001, 0x00031284,
716 0x002, 0x00098000,
717 0x003, 0x00018c63,
718 0x004, 0x000210e7,
719 0x009, 0x0002044f,
720 0x00a, 0x0001adb1,
721 0x00b, 0x00054867,
722 0x00c, 0x0008992e,
723 0x00d, 0x0000e52c,
724 0x00e, 0x00039ce7,
725 0x00f, 0x00000451,
726 0x019, 0x00000000,
727 0x01a, 0x00010255,
728 0x01b, 0x00060a00,
729 0x01c, 0x000fc378,
730 0x01d, 0x000a1250,
731 0x01e, 0x0004445f,
732 0x01f, 0x00080001,
733 0x020, 0x0000b614,
734 0x021, 0x0006c000,
735 0x022, 0x00000000,
736 0x023, 0x00001558,
737 0x024, 0x00000060,
738 0x025, 0x00000483,
739 0x026, 0x0004f000,
740 0x027, 0x000ec7d9,
741 0x028, 0x000577c0,
742 0x029, 0x00004783,
743 0x02a, 0x00000001,
744 0x02b, 0x00021334,
745 0x02a, 0x00000000,
746 0x02b, 0x00000054,
747 0x02a, 0x00000001,
748 0x02b, 0x00000808,
749 0x02b, 0x00053333,
750 0x02c, 0x0000000c,
751 0x02a, 0x00000002,
752 0x02b, 0x00000808,
753 0x02b, 0x0005b333,
754 0x02c, 0x0000000d,
755 0x02a, 0x00000003,
756 0x02b, 0x00000808,
757 0x02b, 0x00063333,
758 0x02c, 0x0000000d,
759 0x02a, 0x00000004,
760 0x02b, 0x00000808,
761 0x02b, 0x0006b333,
762 0x02c, 0x0000000d,
763 0x02a, 0x00000005,
764 0x02b, 0x00000808,
765 0x02b, 0x00073333,
766 0x02c, 0x0000000d,
767 0x02a, 0x00000006,
768 0x02b, 0x00000709,
769 0x02b, 0x0005b333,
770 0x02c, 0x0000000d,
771 0x02a, 0x00000007,
772 0x02b, 0x00000709,
773 0x02b, 0x00063333,
774 0x02c, 0x0000000d,
775 0x02a, 0x00000008,
776 0x02b, 0x0000060a,
777 0x02b, 0x0004b333,
778 0x02c, 0x0000000d,
779 0x02a, 0x00000009,
780 0x02b, 0x0000060a,
781 0x02b, 0x00053333,
782 0x02c, 0x0000000d,
783 0x02a, 0x0000000a,
784 0x02b, 0x0000060a,
785 0x02b, 0x0005b333,
786 0x02c, 0x0000000d,
787 0x02a, 0x0000000b,
788 0x02b, 0x0000060a,
789 0x02b, 0x00063333,
790 0x02c, 0x0000000d,
791 0x02a, 0x0000000c,
792 0x02b, 0x0000060a,
793 0x02b, 0x0006b333,
794 0x02c, 0x0000000d,
795 0x02a, 0x0000000d,
796 0x02b, 0x0000060a,
797 0x02b, 0x00073333,
798 0x02c, 0x0000000d,
799 0x02a, 0x0000000e,
800 0x02b, 0x0000050b,
801 0x02b, 0x00066666,
802 0x02c, 0x0000001a,
803 0x02a, 0x000e0000,
804 0x010, 0x0004000f,
805 0x011, 0x000e31fc,
806 0x010, 0x0006000f,
807 0x011, 0x000ff9f8,
808 0x010, 0x0002000f,
809 0x011, 0x000203f9,
810 0x010, 0x0003000f,
811 0x011, 0x000ff500,
812 0x010, 0x00000000,
813 0x011, 0x00000000,
814 0x010, 0x0008000f,
815 0x011, 0x0003f100,
816 0x010, 0x0009000f,
817 0x011, 0x00023100,
818 0x012, 0x00032000,
819 0x012, 0x00071000,
820 0x012, 0x000b0000,
821 0x012, 0x000fc000,
822 0x013, 0x000287b3,
823 0x013, 0x000244b7,
824 0x013, 0x000204ab,
825 0x013, 0x0001c49f,
826 0x013, 0x00018493,
827 0x013, 0x0001429b,
828 0x013, 0x00010299,
829 0x013, 0x0000c29c,
830 0x013, 0x000081a0,
831 0x013, 0x000040ac,
832 0x013, 0x00000020,
833 0x014, 0x0001944c,
834 0x014, 0x00059444,
835 0x014, 0x0009944c,
836 0x014, 0x000d9444,
837 0x015, 0x0000f405,
838 0x015, 0x0004f405,
839 0x015, 0x0008f405,
840 0x015, 0x000cf405,
841 0x016, 0x000e0330,
842 0x016, 0x000a0330,
843 0x016, 0x00060330,
844 0x016, 0x00020330,
845 0x000, 0x00010159,
846 0x018, 0x0000f401,
847 0x0fe, 0x00000000,
848 0x0fe, 0x00000000,
849 0x01f, 0x00080003,
850 0x0fe, 0x00000000,
851 0x0fe, 0x00000000,
852 0x01e, 0x00044457,
853 0x01f, 0x00080000,
854 0x000, 0x00030159,
855};
856
857u32 RTL8192CU_RADIOB_1TARRAY[RTL8192CURADIOB_1TARRAYLENGTH] = {
858 0x0,
859};
860
861u32 RTL8192CUMAC_2T_ARRAY[RTL8192CUMAC_2T_ARRAYLENGTH] = {
862 0x420, 0x00000080,
863 0x423, 0x00000000,
864 0x430, 0x00000000,
865 0x431, 0x00000000,
866 0x432, 0x00000000,
867 0x433, 0x00000001,
868 0x434, 0x00000004,
869 0x435, 0x00000005,
870 0x436, 0x00000006,
871 0x437, 0x00000007,
872 0x438, 0x00000000,
873 0x439, 0x00000000,
874 0x43a, 0x00000000,
875 0x43b, 0x00000001,
876 0x43c, 0x00000004,
877 0x43d, 0x00000005,
878 0x43e, 0x00000006,
879 0x43f, 0x00000007,
880 0x440, 0x0000005d,
881 0x441, 0x00000001,
882 0x442, 0x00000000,
883 0x444, 0x00000015,
884 0x445, 0x000000f0,
885 0x446, 0x0000000f,
886 0x447, 0x00000000,
887 0x458, 0x00000041,
888 0x459, 0x000000a8,
889 0x45a, 0x00000072,
890 0x45b, 0x000000b9,
891 0x460, 0x00000066,
892 0x461, 0x00000066,
893 0x462, 0x00000008,
894 0x463, 0x00000003,
895 0x4c8, 0x000000ff,
896 0x4c9, 0x00000008,
897 0x4cc, 0x000000ff,
898 0x4cd, 0x000000ff,
899 0x4ce, 0x00000001,
900 0x500, 0x00000026,
901 0x501, 0x000000a2,
902 0x502, 0x0000002f,
903 0x503, 0x00000000,
904 0x504, 0x00000028,
905 0x505, 0x000000a3,
906 0x506, 0x0000005e,
907 0x507, 0x00000000,
908 0x508, 0x0000002b,
909 0x509, 0x000000a4,
910 0x50a, 0x0000005e,
911 0x50b, 0x00000000,
912 0x50c, 0x0000004f,
913 0x50d, 0x000000a4,
914 0x50e, 0x00000000,
915 0x50f, 0x00000000,
916 0x512, 0x0000001c,
917 0x514, 0x0000000a,
918 0x515, 0x00000010,
919 0x516, 0x0000000a,
920 0x517, 0x00000010,
921 0x51a, 0x00000016,
922 0x524, 0x0000000f,
923 0x525, 0x0000004f,
924 0x546, 0x00000040,
925 0x547, 0x00000000,
926 0x550, 0x00000010,
927 0x551, 0x00000010,
928 0x559, 0x00000002,
929 0x55a, 0x00000002,
930 0x55d, 0x000000ff,
931 0x605, 0x00000030,
932 0x608, 0x0000000e,
933 0x609, 0x0000002a,
934 0x652, 0x00000020,
935 0x63c, 0x0000000a,
936 0x63d, 0x0000000e,
937 0x63e, 0x0000000a,
938 0x63f, 0x0000000e,
939 0x66e, 0x00000005,
940 0x700, 0x00000021,
941 0x701, 0x00000043,
942 0x702, 0x00000065,
943 0x703, 0x00000087,
944 0x708, 0x00000021,
945 0x709, 0x00000043,
946 0x70a, 0x00000065,
947 0x70b, 0x00000087,
948};
949
950u32 RTL8192CUAGCTAB_2TARRAY[RTL8192CUAGCTAB_2TARRAYLENGTH] = {
951 0xc78, 0x7b000001,
952 0xc78, 0x7b010001,
953 0xc78, 0x7b020001,
954 0xc78, 0x7b030001,
955 0xc78, 0x7b040001,
956 0xc78, 0x7b050001,
957 0xc78, 0x7a060001,
958 0xc78, 0x79070001,
959 0xc78, 0x78080001,
960 0xc78, 0x77090001,
961 0xc78, 0x760a0001,
962 0xc78, 0x750b0001,
963 0xc78, 0x740c0001,
964 0xc78, 0x730d0001,
965 0xc78, 0x720e0001,
966 0xc78, 0x710f0001,
967 0xc78, 0x70100001,
968 0xc78, 0x6f110001,
969 0xc78, 0x6e120001,
970 0xc78, 0x6d130001,
971 0xc78, 0x6c140001,
972 0xc78, 0x6b150001,
973 0xc78, 0x6a160001,
974 0xc78, 0x69170001,
975 0xc78, 0x68180001,
976 0xc78, 0x67190001,
977 0xc78, 0x661a0001,
978 0xc78, 0x651b0001,
979 0xc78, 0x641c0001,
980 0xc78, 0x631d0001,
981 0xc78, 0x621e0001,
982 0xc78, 0x611f0001,
983 0xc78, 0x60200001,
984 0xc78, 0x49210001,
985 0xc78, 0x48220001,
986 0xc78, 0x47230001,
987 0xc78, 0x46240001,
988 0xc78, 0x45250001,
989 0xc78, 0x44260001,
990 0xc78, 0x43270001,
991 0xc78, 0x42280001,
992 0xc78, 0x41290001,
993 0xc78, 0x402a0001,
994 0xc78, 0x262b0001,
995 0xc78, 0x252c0001,
996 0xc78, 0x242d0001,
997 0xc78, 0x232e0001,
998 0xc78, 0x222f0001,
999 0xc78, 0x21300001,
1000 0xc78, 0x20310001,
1001 0xc78, 0x06320001,
1002 0xc78, 0x05330001,
1003 0xc78, 0x04340001,
1004 0xc78, 0x03350001,
1005 0xc78, 0x02360001,
1006 0xc78, 0x01370001,
1007 0xc78, 0x00380001,
1008 0xc78, 0x00390001,
1009 0xc78, 0x003a0001,
1010 0xc78, 0x003b0001,
1011 0xc78, 0x003c0001,
1012 0xc78, 0x003d0001,
1013 0xc78, 0x003e0001,
1014 0xc78, 0x003f0001,
1015 0xc78, 0x7b400001,
1016 0xc78, 0x7b410001,
1017 0xc78, 0x7b420001,
1018 0xc78, 0x7b430001,
1019 0xc78, 0x7b440001,
1020 0xc78, 0x7b450001,
1021 0xc78, 0x7a460001,
1022 0xc78, 0x79470001,
1023 0xc78, 0x78480001,
1024 0xc78, 0x77490001,
1025 0xc78, 0x764a0001,
1026 0xc78, 0x754b0001,
1027 0xc78, 0x744c0001,
1028 0xc78, 0x734d0001,
1029 0xc78, 0x724e0001,
1030 0xc78, 0x714f0001,
1031 0xc78, 0x70500001,
1032 0xc78, 0x6f510001,
1033 0xc78, 0x6e520001,
1034 0xc78, 0x6d530001,
1035 0xc78, 0x6c540001,
1036 0xc78, 0x6b550001,
1037 0xc78, 0x6a560001,
1038 0xc78, 0x69570001,
1039 0xc78, 0x68580001,
1040 0xc78, 0x67590001,
1041 0xc78, 0x665a0001,
1042 0xc78, 0x655b0001,
1043 0xc78, 0x645c0001,
1044 0xc78, 0x635d0001,
1045 0xc78, 0x625e0001,
1046 0xc78, 0x615f0001,
1047 0xc78, 0x60600001,
1048 0xc78, 0x49610001,
1049 0xc78, 0x48620001,
1050 0xc78, 0x47630001,
1051 0xc78, 0x46640001,
1052 0xc78, 0x45650001,
1053 0xc78, 0x44660001,
1054 0xc78, 0x43670001,
1055 0xc78, 0x42680001,
1056 0xc78, 0x41690001,
1057 0xc78, 0x406a0001,
1058 0xc78, 0x266b0001,
1059 0xc78, 0x256c0001,
1060 0xc78, 0x246d0001,
1061 0xc78, 0x236e0001,
1062 0xc78, 0x226f0001,
1063 0xc78, 0x21700001,
1064 0xc78, 0x20710001,
1065 0xc78, 0x06720001,
1066 0xc78, 0x05730001,
1067 0xc78, 0x04740001,
1068 0xc78, 0x03750001,
1069 0xc78, 0x02760001,
1070 0xc78, 0x01770001,
1071 0xc78, 0x00780001,
1072 0xc78, 0x00790001,
1073 0xc78, 0x007a0001,
1074 0xc78, 0x007b0001,
1075 0xc78, 0x007c0001,
1076 0xc78, 0x007d0001,
1077 0xc78, 0x007e0001,
1078 0xc78, 0x007f0001,
1079 0xc78, 0x3800001e,
1080 0xc78, 0x3801001e,
1081 0xc78, 0x3802001e,
1082 0xc78, 0x3803001e,
1083 0xc78, 0x3804001e,
1084 0xc78, 0x3805001e,
1085 0xc78, 0x3806001e,
1086 0xc78, 0x3807001e,
1087 0xc78, 0x3808001e,
1088 0xc78, 0x3c09001e,
1089 0xc78, 0x3e0a001e,
1090 0xc78, 0x400b001e,
1091 0xc78, 0x440c001e,
1092 0xc78, 0x480d001e,
1093 0xc78, 0x4c0e001e,
1094 0xc78, 0x500f001e,
1095 0xc78, 0x5210001e,
1096 0xc78, 0x5611001e,
1097 0xc78, 0x5a12001e,
1098 0xc78, 0x5e13001e,
1099 0xc78, 0x6014001e,
1100 0xc78, 0x6015001e,
1101 0xc78, 0x6016001e,
1102 0xc78, 0x6217001e,
1103 0xc78, 0x6218001e,
1104 0xc78, 0x6219001e,
1105 0xc78, 0x621a001e,
1106 0xc78, 0x621b001e,
1107 0xc78, 0x621c001e,
1108 0xc78, 0x621d001e,
1109 0xc78, 0x621e001e,
1110 0xc78, 0x621f001e,
1111};
1112
1113u32 RTL8192CUAGCTAB_1TARRAY[RTL8192CUAGCTAB_1TARRAYLENGTH] = {
1114 0xc78, 0x7b000001,
1115 0xc78, 0x7b010001,
1116 0xc78, 0x7b020001,
1117 0xc78, 0x7b030001,
1118 0xc78, 0x7b040001,
1119 0xc78, 0x7b050001,
1120 0xc78, 0x7a060001,
1121 0xc78, 0x79070001,
1122 0xc78, 0x78080001,
1123 0xc78, 0x77090001,
1124 0xc78, 0x760a0001,
1125 0xc78, 0x750b0001,
1126 0xc78, 0x740c0001,
1127 0xc78, 0x730d0001,
1128 0xc78, 0x720e0001,
1129 0xc78, 0x710f0001,
1130 0xc78, 0x70100001,
1131 0xc78, 0x6f110001,
1132 0xc78, 0x6e120001,
1133 0xc78, 0x6d130001,
1134 0xc78, 0x6c140001,
1135 0xc78, 0x6b150001,
1136 0xc78, 0x6a160001,
1137 0xc78, 0x69170001,
1138 0xc78, 0x68180001,
1139 0xc78, 0x67190001,
1140 0xc78, 0x661a0001,
1141 0xc78, 0x651b0001,
1142 0xc78, 0x641c0001,
1143 0xc78, 0x631d0001,
1144 0xc78, 0x621e0001,
1145 0xc78, 0x611f0001,
1146 0xc78, 0x60200001,
1147 0xc78, 0x49210001,
1148 0xc78, 0x48220001,
1149 0xc78, 0x47230001,
1150 0xc78, 0x46240001,
1151 0xc78, 0x45250001,
1152 0xc78, 0x44260001,
1153 0xc78, 0x43270001,
1154 0xc78, 0x42280001,
1155 0xc78, 0x41290001,
1156 0xc78, 0x402a0001,
1157 0xc78, 0x262b0001,
1158 0xc78, 0x252c0001,
1159 0xc78, 0x242d0001,
1160 0xc78, 0x232e0001,
1161 0xc78, 0x222f0001,
1162 0xc78, 0x21300001,
1163 0xc78, 0x20310001,
1164 0xc78, 0x06320001,
1165 0xc78, 0x05330001,
1166 0xc78, 0x04340001,
1167 0xc78, 0x03350001,
1168 0xc78, 0x02360001,
1169 0xc78, 0x01370001,
1170 0xc78, 0x00380001,
1171 0xc78, 0x00390001,
1172 0xc78, 0x003a0001,
1173 0xc78, 0x003b0001,
1174 0xc78, 0x003c0001,
1175 0xc78, 0x003d0001,
1176 0xc78, 0x003e0001,
1177 0xc78, 0x003f0001,
1178 0xc78, 0x7b400001,
1179 0xc78, 0x7b410001,
1180 0xc78, 0x7b420001,
1181 0xc78, 0x7b430001,
1182 0xc78, 0x7b440001,
1183 0xc78, 0x7b450001,
1184 0xc78, 0x7a460001,
1185 0xc78, 0x79470001,
1186 0xc78, 0x78480001,
1187 0xc78, 0x77490001,
1188 0xc78, 0x764a0001,
1189 0xc78, 0x754b0001,
1190 0xc78, 0x744c0001,
1191 0xc78, 0x734d0001,
1192 0xc78, 0x724e0001,
1193 0xc78, 0x714f0001,
1194 0xc78, 0x70500001,
1195 0xc78, 0x6f510001,
1196 0xc78, 0x6e520001,
1197 0xc78, 0x6d530001,
1198 0xc78, 0x6c540001,
1199 0xc78, 0x6b550001,
1200 0xc78, 0x6a560001,
1201 0xc78, 0x69570001,
1202 0xc78, 0x68580001,
1203 0xc78, 0x67590001,
1204 0xc78, 0x665a0001,
1205 0xc78, 0x655b0001,
1206 0xc78, 0x645c0001,
1207 0xc78, 0x635d0001,
1208 0xc78, 0x625e0001,
1209 0xc78, 0x615f0001,
1210 0xc78, 0x60600001,
1211 0xc78, 0x49610001,
1212 0xc78, 0x48620001,
1213 0xc78, 0x47630001,
1214 0xc78, 0x46640001,
1215 0xc78, 0x45650001,
1216 0xc78, 0x44660001,
1217 0xc78, 0x43670001,
1218 0xc78, 0x42680001,
1219 0xc78, 0x41690001,
1220 0xc78, 0x406a0001,
1221 0xc78, 0x266b0001,
1222 0xc78, 0x256c0001,
1223 0xc78, 0x246d0001,
1224 0xc78, 0x236e0001,
1225 0xc78, 0x226f0001,
1226 0xc78, 0x21700001,
1227 0xc78, 0x20710001,
1228 0xc78, 0x06720001,
1229 0xc78, 0x05730001,
1230 0xc78, 0x04740001,
1231 0xc78, 0x03750001,
1232 0xc78, 0x02760001,
1233 0xc78, 0x01770001,
1234 0xc78, 0x00780001,
1235 0xc78, 0x00790001,
1236 0xc78, 0x007a0001,
1237 0xc78, 0x007b0001,
1238 0xc78, 0x007c0001,
1239 0xc78, 0x007d0001,
1240 0xc78, 0x007e0001,
1241 0xc78, 0x007f0001,
1242 0xc78, 0x3800001e,
1243 0xc78, 0x3801001e,
1244 0xc78, 0x3802001e,
1245 0xc78, 0x3803001e,
1246 0xc78, 0x3804001e,
1247 0xc78, 0x3805001e,
1248 0xc78, 0x3806001e,
1249 0xc78, 0x3807001e,
1250 0xc78, 0x3808001e,
1251 0xc78, 0x3c09001e,
1252 0xc78, 0x3e0a001e,
1253 0xc78, 0x400b001e,
1254 0xc78, 0x440c001e,
1255 0xc78, 0x480d001e,
1256 0xc78, 0x4c0e001e,
1257 0xc78, 0x500f001e,
1258 0xc78, 0x5210001e,
1259 0xc78, 0x5611001e,
1260 0xc78, 0x5a12001e,
1261 0xc78, 0x5e13001e,
1262 0xc78, 0x6014001e,
1263 0xc78, 0x6015001e,
1264 0xc78, 0x6016001e,
1265 0xc78, 0x6217001e,
1266 0xc78, 0x6218001e,
1267 0xc78, 0x6219001e,
1268 0xc78, 0x621a001e,
1269 0xc78, 0x621b001e,
1270 0xc78, 0x621c001e,
1271 0xc78, 0x621d001e,
1272 0xc78, 0x621e001e,
1273 0xc78, 0x621f001e,
1274};
1275
1276u32 RTL8192CUPHY_REG_1T_HPArray[RTL8192CUPHY_REG_1T_HPArrayLength] = {
1277 0x024, 0x0011800f,
1278 0x028, 0x00ffdb83,
1279 0x040, 0x000c0004,
1280 0x800, 0x80040000,
1281 0x804, 0x00000001,
1282 0x808, 0x0000fc00,
1283 0x80c, 0x0000000a,
1284 0x810, 0x10005388,
1285 0x814, 0x020c3d10,
1286 0x818, 0x02200385,
1287 0x81c, 0x00000000,
1288 0x820, 0x01000100,
1289 0x824, 0x00390204,
1290 0x828, 0x00000000,
1291 0x82c, 0x00000000,
1292 0x830, 0x00000000,
1293 0x834, 0x00000000,
1294 0x838, 0x00000000,
1295 0x83c, 0x00000000,
1296 0x840, 0x00010000,
1297 0x844, 0x00000000,
1298 0x848, 0x00000000,
1299 0x84c, 0x00000000,
1300 0x850, 0x00000000,
1301 0x854, 0x00000000,
1302 0x858, 0x569a569a,
1303 0x85c, 0x001b25a4,
1304 0x860, 0x66e60230,
1305 0x864, 0x061f0130,
1306 0x868, 0x00000000,
1307 0x86c, 0x20202000,
1308 0x870, 0x03000300,
1309 0x874, 0x22004000,
1310 0x878, 0x00000808,
1311 0x87c, 0x00ffc3f1,
1312 0x880, 0xc0083070,
1313 0x884, 0x000004d5,
1314 0x888, 0x00000000,
1315 0x88c, 0xccc000c0,
1316 0x890, 0x00000800,
1317 0x894, 0xfffffffe,
1318 0x898, 0x40302010,
1319 0x89c, 0x00706050,
1320 0x900, 0x00000000,
1321 0x904, 0x00000023,
1322 0x908, 0x00000000,
1323 0x90c, 0x81121111,
1324 0xa00, 0x00d047c8,
1325 0xa04, 0x80ff000c,
1326 0xa08, 0x8c838300,
1327 0xa0c, 0x2e68120f,
1328 0xa10, 0x9500bb78,
1329 0xa14, 0x11144028,
1330 0xa18, 0x00881117,
1331 0xa1c, 0x89140f00,
1332 0xa20, 0x15160000,
1333 0xa24, 0x070b0f12,
1334 0xa28, 0x00000104,
1335 0xa2c, 0x00d30000,
1336 0xa70, 0x101fbf00,
1337 0xa74, 0x00000007,
1338 0xc00, 0x48071d40,
1339 0xc04, 0x03a05611,
1340 0xc08, 0x000000e4,
1341 0xc0c, 0x6c6c6c6c,
1342 0xc10, 0x08800000,
1343 0xc14, 0x40000100,
1344 0xc18, 0x08800000,
1345 0xc1c, 0x40000100,
1346 0xc20, 0x00000000,
1347 0xc24, 0x00000000,
1348 0xc28, 0x00000000,
1349 0xc2c, 0x00000000,
1350 0xc30, 0x69e9ac44,
1351 0xc34, 0x469652cf,
1352 0xc38, 0x49795994,
1353 0xc3c, 0x0a97971c,
1354 0xc40, 0x1f7c403f,
1355 0xc44, 0x000100b7,
1356 0xc48, 0xec020107,
1357 0xc4c, 0x007f037f,
1358 0xc50, 0x6954342e,
1359 0xc54, 0x43bc0094,
1360 0xc58, 0x6954342f,
1361 0xc5c, 0x433c0094,
1362 0xc60, 0x00000000,
1363 0xc64, 0x5116848b,
1364 0xc68, 0x47c00bff,
1365 0xc6c, 0x00000036,
1366 0xc70, 0x2c46000d,
1367 0xc74, 0x018610db,
1368 0xc78, 0x0000001f,
1369 0xc7c, 0x00b91612,
1370 0xc80, 0x24000090,
1371 0xc84, 0x20f60000,
1372 0xc88, 0x24000090,
1373 0xc8c, 0x20200000,
1374 0xc90, 0x00121820,
1375 0xc94, 0x00000000,
1376 0xc98, 0x00121820,
1377 0xc9c, 0x00007f7f,
1378 0xca0, 0x00000000,
1379 0xca4, 0x00000080,
1380 0xca8, 0x00000000,
1381 0xcac, 0x00000000,
1382 0xcb0, 0x00000000,
1383 0xcb4, 0x00000000,
1384 0xcb8, 0x00000000,
1385 0xcbc, 0x28000000,
1386 0xcc0, 0x00000000,
1387 0xcc4, 0x00000000,
1388 0xcc8, 0x00000000,
1389 0xccc, 0x00000000,
1390 0xcd0, 0x00000000,
1391 0xcd4, 0x00000000,
1392 0xcd8, 0x64b22427,
1393 0xcdc, 0x00766932,
1394 0xce0, 0x00222222,
1395 0xce4, 0x00000000,
1396 0xce8, 0x37644302,
1397 0xcec, 0x2f97d40c,
1398 0xd00, 0x00080740,
1399 0xd04, 0x00020401,
1400 0xd08, 0x0000907f,
1401 0xd0c, 0x20010201,
1402 0xd10, 0xa0633333,
1403 0xd14, 0x3333bc43,
1404 0xd18, 0x7a8f5b6b,
1405 0xd2c, 0xcc979975,
1406 0xd30, 0x00000000,
1407 0xd34, 0x80608000,
1408 0xd38, 0x00000000,
1409 0xd3c, 0x00027293,
1410 0xd40, 0x00000000,
1411 0xd44, 0x00000000,
1412 0xd48, 0x00000000,
1413 0xd4c, 0x00000000,
1414 0xd50, 0x6437140a,
1415 0xd54, 0x00000000,
1416 0xd58, 0x00000000,
1417 0xd5c, 0x30032064,
1418 0xd60, 0x4653de68,
1419 0xd64, 0x04518a3c,
1420 0xd68, 0x00002101,
1421 0xd6c, 0x2a201c16,
1422 0xd70, 0x1812362e,
1423 0xd74, 0x322c2220,
1424 0xd78, 0x000e3c24,
1425 0xe00, 0x24242424,
1426 0xe04, 0x24242424,
1427 0xe08, 0x03902024,
1428 0xe10, 0x24242424,
1429 0xe14, 0x24242424,
1430 0xe18, 0x24242424,
1431 0xe1c, 0x24242424,
1432 0xe28, 0x00000000,
1433 0xe30, 0x1000dc1f,
1434 0xe34, 0x10008c1f,
1435 0xe38, 0x02140102,
1436 0xe3c, 0x681604c2,
1437 0xe40, 0x01007c00,
1438 0xe44, 0x01004800,
1439 0xe48, 0xfb000000,
1440 0xe4c, 0x000028d1,
1441 0xe50, 0x1000dc1f,
1442 0xe54, 0x10008c1f,
1443 0xe58, 0x02140102,
1444 0xe5c, 0x28160d05,
1445 0xe60, 0x00000008,
1446 0xe68, 0x001b25a4,
1447 0xe6c, 0x631b25a0,
1448 0xe70, 0x631b25a0,
1449 0xe74, 0x081b25a0,
1450 0xe78, 0x081b25a0,
1451 0xe7c, 0x081b25a0,
1452 0xe80, 0x081b25a0,
1453 0xe84, 0x631b25a0,
1454 0xe88, 0x081b25a0,
1455 0xe8c, 0x631b25a0,
1456 0xed0, 0x631b25a0,
1457 0xed4, 0x631b25a0,
1458 0xed8, 0x631b25a0,
1459 0xedc, 0x001b25a0,
1460 0xee0, 0x001b25a0,
1461 0xeec, 0x6b1b25a0,
1462 0xee8, 0x31555448,
1463 0xf14, 0x00000003,
1464 0xf4c, 0x00000000,
1465 0xf00, 0x00000300,
1466};
1467
1468u32 RTL8192CUPHY_REG_Array_PG_HP[RTL8192CUPHY_REG_Array_PG_HPLength] = {
1469 0xe00, 0xffffffff, 0x06080808,
1470 0xe04, 0xffffffff, 0x00040406,
1471 0xe08, 0x0000ff00, 0x00000000,
1472 0x86c, 0xffffff00, 0x00000000,
1473 0xe10, 0xffffffff, 0x04060608,
1474 0xe14, 0xffffffff, 0x00020204,
1475 0xe18, 0xffffffff, 0x04060608,
1476 0xe1c, 0xffffffff, 0x00020204,
1477 0x830, 0xffffffff, 0x06080808,
1478 0x834, 0xffffffff, 0x00040406,
1479 0x838, 0xffffff00, 0x00000000,
1480 0x86c, 0x000000ff, 0x00000000,
1481 0x83c, 0xffffffff, 0x04060608,
1482 0x848, 0xffffffff, 0x00020204,
1483 0x84c, 0xffffffff, 0x04060608,
1484 0x868, 0xffffffff, 0x00020204,
1485 0xe00, 0xffffffff, 0x00000000,
1486 0xe04, 0xffffffff, 0x00000000,
1487 0xe08, 0x0000ff00, 0x00000000,
1488 0x86c, 0xffffff00, 0x00000000,
1489 0xe10, 0xffffffff, 0x00000000,
1490 0xe14, 0xffffffff, 0x00000000,
1491 0xe18, 0xffffffff, 0x00000000,
1492 0xe1c, 0xffffffff, 0x00000000,
1493 0x830, 0xffffffff, 0x00000000,
1494 0x834, 0xffffffff, 0x00000000,
1495 0x838, 0xffffff00, 0x00000000,
1496 0x86c, 0x000000ff, 0x00000000,
1497 0x83c, 0xffffffff, 0x00000000,
1498 0x848, 0xffffffff, 0x00000000,
1499 0x84c, 0xffffffff, 0x00000000,
1500 0x868, 0xffffffff, 0x00000000,
1501 0xe00, 0xffffffff, 0x00000000,
1502 0xe04, 0xffffffff, 0x00000000,
1503 0xe08, 0x0000ff00, 0x00000000,
1504 0x86c, 0xffffff00, 0x00000000,
1505 0xe10, 0xffffffff, 0x00000000,
1506 0xe14, 0xffffffff, 0x00000000,
1507 0xe18, 0xffffffff, 0x00000000,
1508 0xe1c, 0xffffffff, 0x00000000,
1509 0x830, 0xffffffff, 0x00000000,
1510 0x834, 0xffffffff, 0x00000000,
1511 0x838, 0xffffff00, 0x00000000,
1512 0x86c, 0x000000ff, 0x00000000,
1513 0x83c, 0xffffffff, 0x00000000,
1514 0x848, 0xffffffff, 0x00000000,
1515 0x84c, 0xffffffff, 0x00000000,
1516 0x868, 0xffffffff, 0x00000000,
1517 0xe00, 0xffffffff, 0x00000000,
1518 0xe04, 0xffffffff, 0x00000000,
1519 0xe08, 0x0000ff00, 0x00000000,
1520 0x86c, 0xffffff00, 0x00000000,
1521 0xe10, 0xffffffff, 0x00000000,
1522 0xe14, 0xffffffff, 0x00000000,
1523 0xe18, 0xffffffff, 0x00000000,
1524 0xe1c, 0xffffffff, 0x00000000,
1525 0x830, 0xffffffff, 0x00000000,
1526 0x834, 0xffffffff, 0x00000000,
1527 0x838, 0xffffff00, 0x00000000,
1528 0x86c, 0x000000ff, 0x00000000,
1529 0x83c, 0xffffffff, 0x00000000,
1530 0x848, 0xffffffff, 0x00000000,
1531 0x84c, 0xffffffff, 0x00000000,
1532 0x868, 0xffffffff, 0x00000000,
1533 0xe00, 0xffffffff, 0x00000000,
1534 0xe04, 0xffffffff, 0x00000000,
1535 0xe08, 0x0000ff00, 0x00000000,
1536 0x86c, 0xffffff00, 0x00000000,
1537 0xe10, 0xffffffff, 0x00000000,
1538 0xe14, 0xffffffff, 0x00000000,
1539 0xe18, 0xffffffff, 0x00000000,
1540 0xe1c, 0xffffffff, 0x00000000,
1541 0x830, 0xffffffff, 0x00000000,
1542 0x834, 0xffffffff, 0x00000000,
1543 0x838, 0xffffff00, 0x00000000,
1544 0x86c, 0x000000ff, 0x00000000,
1545 0x83c, 0xffffffff, 0x00000000,
1546 0x848, 0xffffffff, 0x00000000,
1547 0x84c, 0xffffffff, 0x00000000,
1548 0x868, 0xffffffff, 0x00000000,
1549 0xe00, 0xffffffff, 0x00000000,
1550 0xe04, 0xffffffff, 0x00000000,
1551 0xe08, 0x0000ff00, 0x00000000,
1552 0x86c, 0xffffff00, 0x00000000,
1553 0xe10, 0xffffffff, 0x00000000,
1554 0xe14, 0xffffffff, 0x00000000,
1555 0xe18, 0xffffffff, 0x00000000,
1556 0xe1c, 0xffffffff, 0x00000000,
1557 0x830, 0xffffffff, 0x00000000,
1558 0x834, 0xffffffff, 0x00000000,
1559 0x838, 0xffffff00, 0x00000000,
1560 0x86c, 0x000000ff, 0x00000000,
1561 0x83c, 0xffffffff, 0x00000000,
1562 0x848, 0xffffffff, 0x00000000,
1563 0x84c, 0xffffffff, 0x00000000,
1564 0x868, 0xffffffff, 0x00000000,
1565 0xe00, 0xffffffff, 0x00000000,
1566 0xe04, 0xffffffff, 0x00000000,
1567 0xe08, 0x0000ff00, 0x00000000,
1568 0x86c, 0xffffff00, 0x00000000,
1569 0xe10, 0xffffffff, 0x00000000,
1570 0xe14, 0xffffffff, 0x00000000,
1571 0xe18, 0xffffffff, 0x00000000,
1572 0xe1c, 0xffffffff, 0x00000000,
1573 0x830, 0xffffffff, 0x00000000,
1574 0x834, 0xffffffff, 0x00000000,
1575 0x838, 0xffffff00, 0x00000000,
1576 0x86c, 0x000000ff, 0x00000000,
1577 0x83c, 0xffffffff, 0x00000000,
1578 0x848, 0xffffffff, 0x00000000,
1579 0x84c, 0xffffffff, 0x00000000,
1580 0x868, 0xffffffff, 0x00000000,
1581};
1582
1583u32 RTL8192CURadioA_1T_HPArray[RTL8192CURadioA_1T_HPArrayLength] = {
1584 0x000, 0x00030159,
1585 0x001, 0x00031284,
1586 0x002, 0x00098000,
1587 0x003, 0x00018c63,
1588 0x004, 0x000210e7,
1589 0x009, 0x0002044f,
1590 0x00a, 0x0001adb0,
1591 0x00b, 0x00054867,
1592 0x00c, 0x0008992e,
1593 0x00d, 0x0000e529,
1594 0x00e, 0x00039ce7,
1595 0x00f, 0x00000451,
1596 0x019, 0x00000000,
1597 0x01a, 0x00000255,
1598 0x01b, 0x00060a00,
1599 0x01c, 0x000fc378,
1600 0x01d, 0x000a1250,
1601 0x01e, 0x0004445f,
1602 0x01f, 0x00080001,
1603 0x020, 0x0000b614,
1604 0x021, 0x0006c000,
1605 0x022, 0x0000083c,
1606 0x023, 0x00001558,
1607 0x024, 0x00000060,
1608 0x025, 0x00000483,
1609 0x026, 0x0004f000,
1610 0x027, 0x000ec7d9,
1611 0x028, 0x000977c0,
1612 0x029, 0x00004783,
1613 0x02a, 0x00000001,
1614 0x02b, 0x00021334,
1615 0x02a, 0x00000000,
1616 0x02b, 0x00000054,
1617 0x02a, 0x00000001,
1618 0x02b, 0x00000808,
1619 0x02b, 0x00053333,
1620 0x02c, 0x0000000c,
1621 0x02a, 0x00000002,
1622 0x02b, 0x00000808,
1623 0x02b, 0x0005b333,
1624 0x02c, 0x0000000d,
1625 0x02a, 0x00000003,
1626 0x02b, 0x00000808,
1627 0x02b, 0x00063333,
1628 0x02c, 0x0000000d,
1629 0x02a, 0x00000004,
1630 0x02b, 0x00000808,
1631 0x02b, 0x0006b333,
1632 0x02c, 0x0000000d,
1633 0x02a, 0x00000005,
1634 0x02b, 0x00000808,
1635 0x02b, 0x00073333,
1636 0x02c, 0x0000000d,
1637 0x02a, 0x00000006,
1638 0x02b, 0x00000709,
1639 0x02b, 0x0005b333,
1640 0x02c, 0x0000000d,
1641 0x02a, 0x00000007,
1642 0x02b, 0x00000709,
1643 0x02b, 0x00063333,
1644 0x02c, 0x0000000d,
1645 0x02a, 0x00000008,
1646 0x02b, 0x0000060a,
1647 0x02b, 0x0004b333,
1648 0x02c, 0x0000000d,
1649 0x02a, 0x00000009,
1650 0x02b, 0x0000060a,
1651 0x02b, 0x00053333,
1652 0x02c, 0x0000000d,
1653 0x02a, 0x0000000a,
1654 0x02b, 0x0000060a,
1655 0x02b, 0x0005b333,
1656 0x02c, 0x0000000d,
1657 0x02a, 0x0000000b,
1658 0x02b, 0x0000060a,
1659 0x02b, 0x00063333,
1660 0x02c, 0x0000000d,
1661 0x02a, 0x0000000c,
1662 0x02b, 0x0000060a,
1663 0x02b, 0x0006b333,
1664 0x02c, 0x0000000d,
1665 0x02a, 0x0000000d,
1666 0x02b, 0x0000060a,
1667 0x02b, 0x00073333,
1668 0x02c, 0x0000000d,
1669 0x02a, 0x0000000e,
1670 0x02b, 0x0000050b,
1671 0x02b, 0x00066666,
1672 0x02c, 0x0000001a,
1673 0x02a, 0x000e0000,
1674 0x010, 0x0004000f,
1675 0x011, 0x000e31fc,
1676 0x010, 0x0006000f,
1677 0x011, 0x000ff9f8,
1678 0x010, 0x0002000f,
1679 0x011, 0x000203f9,
1680 0x010, 0x0003000f,
1681 0x011, 0x000ff500,
1682 0x010, 0x00000000,
1683 0x011, 0x00000000,
1684 0x010, 0x0008000f,
1685 0x011, 0x0003f100,
1686 0x010, 0x0009000f,
1687 0x011, 0x00023100,
1688 0x012, 0x000d8000,
1689 0x012, 0x00090000,
1690 0x012, 0x00051000,
1691 0x012, 0x00012000,
1692 0x013, 0x00028fb4,
1693 0x013, 0x00024fa8,
1694 0x013, 0x000207a4,
1695 0x013, 0x0001c798,
1696 0x013, 0x000183a4,
1697 0x013, 0x00014398,
1698 0x013, 0x000101a4,
1699 0x013, 0x0000c198,
1700 0x013, 0x000080a4,
1701 0x013, 0x00004098,
1702 0x013, 0x00000000,
1703 0x014, 0x0001944c,
1704 0x014, 0x00059444,
1705 0x014, 0x0009944c,
1706 0x014, 0x000d9444,
1707 0x015, 0x0000f405,
1708 0x015, 0x0004f405,
1709 0x015, 0x0008f405,
1710 0x015, 0x000cf405,
1711 0x016, 0x000e0330,
1712 0x016, 0x000a0330,
1713 0x016, 0x00060330,
1714 0x016, 0x00020330,
1715 0x000, 0x00010159,
1716 0x018, 0x0000f401,
1717 0x0fe, 0x00000000,
1718 0x0fe, 0x00000000,
1719 0x01f, 0x00080003,
1720 0x0fe, 0x00000000,
1721 0x0fe, 0x00000000,
1722 0x01e, 0x00044457,
1723 0x01f, 0x00080000,
1724 0x000, 0x00030159,
1725};
1726
1727u32 Rtl8192CUAGCTAB_1T_HPArray[RTL8192CUAGCTAB_1T_HPArrayLength] = {
1728 0xc78, 0x7b000001,
1729 0xc78, 0x7b010001,
1730 0xc78, 0x7b020001,
1731 0xc78, 0x7b030001,
1732 0xc78, 0x7b040001,
1733 0xc78, 0x7b050001,
1734 0xc78, 0x7b060001,
1735 0xc78, 0x7b070001,
1736 0xc78, 0x7b080001,
1737 0xc78, 0x7a090001,
1738 0xc78, 0x790a0001,
1739 0xc78, 0x780b0001,
1740 0xc78, 0x770c0001,
1741 0xc78, 0x760d0001,
1742 0xc78, 0x750e0001,
1743 0xc78, 0x740f0001,
1744 0xc78, 0x73100001,
1745 0xc78, 0x72110001,
1746 0xc78, 0x71120001,
1747 0xc78, 0x70130001,
1748 0xc78, 0x6f140001,
1749 0xc78, 0x6e150001,
1750 0xc78, 0x6d160001,
1751 0xc78, 0x6c170001,
1752 0xc78, 0x6b180001,
1753 0xc78, 0x6a190001,
1754 0xc78, 0x691a0001,
1755 0xc78, 0x681b0001,
1756 0xc78, 0x671c0001,
1757 0xc78, 0x661d0001,
1758 0xc78, 0x651e0001,
1759 0xc78, 0x641f0001,
1760 0xc78, 0x63200001,
1761 0xc78, 0x62210001,
1762 0xc78, 0x61220001,
1763 0xc78, 0x60230001,
1764 0xc78, 0x46240001,
1765 0xc78, 0x45250001,
1766 0xc78, 0x44260001,
1767 0xc78, 0x43270001,
1768 0xc78, 0x42280001,
1769 0xc78, 0x41290001,
1770 0xc78, 0x402a0001,
1771 0xc78, 0x262b0001,
1772 0xc78, 0x252c0001,
1773 0xc78, 0x242d0001,
1774 0xc78, 0x232e0001,
1775 0xc78, 0x222f0001,
1776 0xc78, 0x21300001,
1777 0xc78, 0x20310001,
1778 0xc78, 0x06320001,
1779 0xc78, 0x05330001,
1780 0xc78, 0x04340001,
1781 0xc78, 0x03350001,
1782 0xc78, 0x02360001,
1783 0xc78, 0x01370001,
1784 0xc78, 0x00380001,
1785 0xc78, 0x00390001,
1786 0xc78, 0x003a0001,
1787 0xc78, 0x003b0001,
1788 0xc78, 0x003c0001,
1789 0xc78, 0x003d0001,
1790 0xc78, 0x003e0001,
1791 0xc78, 0x003f0001,
1792 0xc78, 0x7b400001,
1793 0xc78, 0x7b410001,
1794 0xc78, 0x7b420001,
1795 0xc78, 0x7b430001,
1796 0xc78, 0x7b440001,
1797 0xc78, 0x7b450001,
1798 0xc78, 0x7b460001,
1799 0xc78, 0x7b470001,
1800 0xc78, 0x7b480001,
1801 0xc78, 0x7a490001,
1802 0xc78, 0x794a0001,
1803 0xc78, 0x784b0001,
1804 0xc78, 0x774c0001,
1805 0xc78, 0x764d0001,
1806 0xc78, 0x754e0001,
1807 0xc78, 0x744f0001,
1808 0xc78, 0x73500001,
1809 0xc78, 0x72510001,
1810 0xc78, 0x71520001,
1811 0xc78, 0x70530001,
1812 0xc78, 0x6f540001,
1813 0xc78, 0x6e550001,
1814 0xc78, 0x6d560001,
1815 0xc78, 0x6c570001,
1816 0xc78, 0x6b580001,
1817 0xc78, 0x6a590001,
1818 0xc78, 0x695a0001,
1819 0xc78, 0x685b0001,
1820 0xc78, 0x675c0001,
1821 0xc78, 0x665d0001,
1822 0xc78, 0x655e0001,
1823 0xc78, 0x645f0001,
1824 0xc78, 0x63600001,
1825 0xc78, 0x62610001,
1826 0xc78, 0x61620001,
1827 0xc78, 0x60630001,
1828 0xc78, 0x46640001,
1829 0xc78, 0x45650001,
1830 0xc78, 0x44660001,
1831 0xc78, 0x43670001,
1832 0xc78, 0x42680001,
1833 0xc78, 0x41690001,
1834 0xc78, 0x406a0001,
1835 0xc78, 0x266b0001,
1836 0xc78, 0x256c0001,
1837 0xc78, 0x246d0001,
1838 0xc78, 0x236e0001,
1839 0xc78, 0x226f0001,
1840 0xc78, 0x21700001,
1841 0xc78, 0x20710001,
1842 0xc78, 0x06720001,
1843 0xc78, 0x05730001,
1844 0xc78, 0x04740001,
1845 0xc78, 0x03750001,
1846 0xc78, 0x02760001,
1847 0xc78, 0x01770001,
1848 0xc78, 0x00780001,
1849 0xc78, 0x00790001,
1850 0xc78, 0x007a0001,
1851 0xc78, 0x007b0001,
1852 0xc78, 0x007c0001,
1853 0xc78, 0x007d0001,
1854 0xc78, 0x007e0001,
1855 0xc78, 0x007f0001,
1856 0xc78, 0x3800001e,
1857 0xc78, 0x3801001e,
1858 0xc78, 0x3802001e,
1859 0xc78, 0x3803001e,
1860 0xc78, 0x3804001e,
1861 0xc78, 0x3805001e,
1862 0xc78, 0x3806001e,
1863 0xc78, 0x3807001e,
1864 0xc78, 0x3808001e,
1865 0xc78, 0x3c09001e,
1866 0xc78, 0x3e0a001e,
1867 0xc78, 0x400b001e,
1868 0xc78, 0x440c001e,
1869 0xc78, 0x480d001e,
1870 0xc78, 0x4c0e001e,
1871 0xc78, 0x500f001e,
1872 0xc78, 0x5210001e,
1873 0xc78, 0x5611001e,
1874 0xc78, 0x5a12001e,
1875 0xc78, 0x5e13001e,
1876 0xc78, 0x6014001e,
1877 0xc78, 0x6015001e,
1878 0xc78, 0x6016001e,
1879 0xc78, 0x6217001e,
1880 0xc78, 0x6218001e,
1881 0xc78, 0x6219001e,
1882 0xc78, 0x621a001e,
1883 0xc78, 0x621b001e,
1884 0xc78, 0x621c001e,
1885 0xc78, 0x621d001e,
1886 0xc78, 0x621e001e,
1887 0xc78, 0x621f001e,
1888};
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/table.h b/drivers/net/wireless/rtlwifi/rtl8192cu/table.h
new file mode 100644
index 00000000000..c3d5cd826cf
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/table.h
@@ -0,0 +1,71 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL92CU_TABLE__H_
31#define __RTL92CU_TABLE__H_
32
33#include <linux/types.h>
34
35#define RTL8192CUPHY_REG_2TARRAY_LENGTH 374
36extern u32 RTL8192CUPHY_REG_2TARRAY[RTL8192CUPHY_REG_2TARRAY_LENGTH];
37#define RTL8192CUPHY_REG_1TARRAY_LENGTH 374
38extern u32 RTL8192CUPHY_REG_1TARRAY[RTL8192CUPHY_REG_1TARRAY_LENGTH];
39
40#define RTL8192CUPHY_REG_ARRAY_PGLENGTH 336
41extern u32 RTL8192CUPHY_REG_ARRAY_PG[RTL8192CUPHY_REG_ARRAY_PGLENGTH];
42
43#define RTL8192CURADIOA_2TARRAYLENGTH 282
44extern u32 RTL8192CURADIOA_2TARRAY[RTL8192CURADIOA_2TARRAYLENGTH];
45#define RTL8192CURADIOB_2TARRAYLENGTH 78
46extern u32 RTL8192CU_RADIOB_2TARRAY[RTL8192CURADIOB_2TARRAYLENGTH];
47#define RTL8192CURADIOA_1TARRAYLENGTH 282
48extern u32 RTL8192CU_RADIOA_1TARRAY[RTL8192CURADIOA_1TARRAYLENGTH];
49#define RTL8192CURADIOB_1TARRAYLENGTH 1
50extern u32 RTL8192CU_RADIOB_1TARRAY[RTL8192CURADIOB_1TARRAYLENGTH];
51
52#define RTL8192CUMAC_2T_ARRAYLENGTH 172
53extern u32 RTL8192CUMAC_2T_ARRAY[RTL8192CUMAC_2T_ARRAYLENGTH];
54
55#define RTL8192CUAGCTAB_2TARRAYLENGTH 320
56extern u32 RTL8192CUAGCTAB_2TARRAY[RTL8192CUAGCTAB_2TARRAYLENGTH];
57#define RTL8192CUAGCTAB_1TARRAYLENGTH 320
58extern u32 RTL8192CUAGCTAB_1TARRAY[RTL8192CUAGCTAB_1TARRAYLENGTH];
59
60#define RTL8192CUPHY_REG_1T_HPArrayLength 378
61extern u32 RTL8192CUPHY_REG_1T_HPArray[RTL8192CUPHY_REG_1T_HPArrayLength];
62
63#define RTL8192CUPHY_REG_Array_PG_HPLength 336
64extern u32 RTL8192CUPHY_REG_Array_PG_HP[RTL8192CUPHY_REG_Array_PG_HPLength];
65
66#define RTL8192CURadioA_1T_HPArrayLength 282
67extern u32 RTL8192CURadioA_1T_HPArray[RTL8192CURadioA_1T_HPArrayLength];
68#define RTL8192CUAGCTAB_1T_HPArrayLength 320
69extern u32 Rtl8192CUAGCTAB_1T_HPArray[RTL8192CUAGCTAB_1T_HPArrayLength];
70
71#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
new file mode 100644
index 00000000000..9855c3e0a4b
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -0,0 +1,684 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../wifi.h"
31#include "../usb.h"
32#include "../ps.h"
33#include "../base.h"
34#include "reg.h"
35#include "def.h"
36#include "phy.h"
37#include "rf.h"
38#include "dm.h"
39#include "mac.h"
40#include "trx.h"
41
42static int _ConfigVerTOutEP(struct ieee80211_hw *hw)
43{
44 u8 ep_cfg, txqsele;
45 u8 ep_nums = 0;
46
47 struct rtl_priv *rtlpriv = rtl_priv(hw);
48 struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
49 struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
50
51 rtlusb->out_queue_sel = 0;
52 ep_cfg = rtl_read_byte(rtlpriv, REG_TEST_SIE_OPTIONAL);
53 ep_cfg = (ep_cfg & USB_TEST_EP_MASK) >> USB_TEST_EP_SHIFT;
54 switch (ep_cfg) {
55 case 0: /* 2 bulk OUT, 1 bulk IN */
56 case 3:
57 rtlusb->out_queue_sel = TX_SELE_HQ | TX_SELE_LQ;
58 ep_nums = 2;
59 break;
60 case 1: /* 1 bulk IN/OUT => map all endpoint to Low queue */
61 case 2: /* 1 bulk IN, 1 bulk OUT => map all endpoint to High queue */
62 txqsele = rtl_read_byte(rtlpriv, REG_TEST_USB_TXQS);
63 if (txqsele & 0x0F) /* /map all endpoint to High queue */
64 rtlusb->out_queue_sel = TX_SELE_HQ;
65 else if (txqsele&0xF0) /* map all endpoint to Low queue */
66 rtlusb->out_queue_sel = TX_SELE_LQ;
67 ep_nums = 1;
68 break;
69 default:
70 break;
71 }
72 return (rtlusb->out_ep_nums == ep_nums) ? 0 : -EINVAL;
73}
74
75static int _ConfigVerNOutEP(struct ieee80211_hw *hw)
76{
77 u8 ep_cfg;
78 u8 ep_nums = 0;
79
80 struct rtl_priv *rtlpriv = rtl_priv(hw);
81 struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
82 struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
83
84 rtlusb->out_queue_sel = 0;
85 /* Normal and High queue */
86 ep_cfg = rtl_read_byte(rtlpriv, (REG_NORMAL_SIE_EP + 1));
87 if (ep_cfg & USB_NORMAL_SIE_EP_MASK) {
88 rtlusb->out_queue_sel |= TX_SELE_HQ;
89 ep_nums++;
90 }
91 if ((ep_cfg >> USB_NORMAL_SIE_EP_SHIFT) & USB_NORMAL_SIE_EP_MASK) {
92 rtlusb->out_queue_sel |= TX_SELE_NQ;
93 ep_nums++;
94 }
95 /* Low queue */
96 ep_cfg = rtl_read_byte(rtlpriv, (REG_NORMAL_SIE_EP + 2));
97 if (ep_cfg & USB_NORMAL_SIE_EP_MASK) {
98 rtlusb->out_queue_sel |= TX_SELE_LQ;
99 ep_nums++;
100 }
101 return (rtlusb->out_ep_nums == ep_nums) ? 0 : -EINVAL;
102}
103
104static void _TwoOutEpMapping(struct ieee80211_hw *hw, bool bIsChipB,
105 bool bwificfg, struct rtl_ep_map *ep_map)
106{
107 struct rtl_priv *rtlpriv = rtl_priv(hw);
108
109 if (bwificfg) { /* for WMM */
110 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
111 ("USB Chip-B & WMM Setting.....\n"));
112 ep_map->ep_mapping[RTL_TXQ_BE] = 2;
113 ep_map->ep_mapping[RTL_TXQ_BK] = 3;
114 ep_map->ep_mapping[RTL_TXQ_VI] = 3;
115 ep_map->ep_mapping[RTL_TXQ_VO] = 2;
116 ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
117 ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
118 ep_map->ep_mapping[RTL_TXQ_HI] = 2;
119 } else { /* typical setting */
120 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
121 ("USB typical Setting.....\n"));
122 ep_map->ep_mapping[RTL_TXQ_BE] = 3;
123 ep_map->ep_mapping[RTL_TXQ_BK] = 3;
124 ep_map->ep_mapping[RTL_TXQ_VI] = 2;
125 ep_map->ep_mapping[RTL_TXQ_VO] = 2;
126 ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
127 ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
128 ep_map->ep_mapping[RTL_TXQ_HI] = 2;
129 }
130}
131
132static void _ThreeOutEpMapping(struct ieee80211_hw *hw, bool bwificfg,
133 struct rtl_ep_map *ep_map)
134{
135 struct rtl_priv *rtlpriv = rtl_priv(hw);
136 if (bwificfg) { /* for WMM */
137 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
138 ("USB 3EP Setting for WMM.....\n"));
139 ep_map->ep_mapping[RTL_TXQ_BE] = 5;
140 ep_map->ep_mapping[RTL_TXQ_BK] = 3;
141 ep_map->ep_mapping[RTL_TXQ_VI] = 3;
142 ep_map->ep_mapping[RTL_TXQ_VO] = 2;
143 ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
144 ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
145 ep_map->ep_mapping[RTL_TXQ_HI] = 2;
146 } else { /* typical setting */
147 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
148 ("USB 3EP Setting for typical.....\n"));
149 ep_map->ep_mapping[RTL_TXQ_BE] = 5;
150 ep_map->ep_mapping[RTL_TXQ_BK] = 5;
151 ep_map->ep_mapping[RTL_TXQ_VI] = 3;
152 ep_map->ep_mapping[RTL_TXQ_VO] = 2;
153 ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
154 ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
155 ep_map->ep_mapping[RTL_TXQ_HI] = 2;
156 }
157}
158
159static void _OneOutEpMapping(struct ieee80211_hw *hw, struct rtl_ep_map *ep_map)
160{
161 ep_map->ep_mapping[RTL_TXQ_BE] = 2;
162 ep_map->ep_mapping[RTL_TXQ_BK] = 2;
163 ep_map->ep_mapping[RTL_TXQ_VI] = 2;
164 ep_map->ep_mapping[RTL_TXQ_VO] = 2;
165 ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
166 ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
167 ep_map->ep_mapping[RTL_TXQ_HI] = 2;
168}
169static int _out_ep_mapping(struct ieee80211_hw *hw)
170{
171 int err = 0;
172 bool bIsChipN, bwificfg = false;
173 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
174 struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
175 struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
176 struct rtl_ep_map *ep_map = &(rtlusb->ep_map);
177
178 bIsChipN = IS_NORMAL_CHIP(rtlhal->version);
179 switch (rtlusb->out_ep_nums) {
180 case 2:
181 _TwoOutEpMapping(hw, bIsChipN, bwificfg, ep_map);
182 break;
183 case 3:
184 /* Test chip doesn't support three out EPs. */
185 if (!bIsChipN) {
186 err = -EINVAL;
187 goto err_out;
188 }
189 _ThreeOutEpMapping(hw, bIsChipN, ep_map);
190 break;
191 case 1:
192 _OneOutEpMapping(hw, ep_map);
193 break;
194 default:
195 err = -EINVAL;
196 break;
197 }
198err_out:
199 return err;
200
201}
202/* endpoint mapping */
203int rtl8192cu_endpoint_mapping(struct ieee80211_hw *hw)
204{
205 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
206 int error = 0;
207 if (likely(IS_NORMAL_CHIP(rtlhal->version)))
208 error = _ConfigVerNOutEP(hw);
209 else
210 error = _ConfigVerTOutEP(hw);
211 if (error)
212 goto err_out;
213 error = _out_ep_mapping(hw);
214 if (error)
215 goto err_out;
216err_out:
217 return error;
218}
219
220u16 rtl8192cu_mq_to_hwq(__le16 fc, u16 mac80211_queue_index)
221{
222 u16 hw_queue_index;
223
224 if (unlikely(ieee80211_is_beacon(fc))) {
225 hw_queue_index = RTL_TXQ_BCN;
226 goto out;
227 }
228 if (ieee80211_is_mgmt(fc)) {
229 hw_queue_index = RTL_TXQ_MGT;
230 goto out;
231 }
232 switch (mac80211_queue_index) {
233 case 0:
234 hw_queue_index = RTL_TXQ_VO;
235 break;
236 case 1:
237 hw_queue_index = RTL_TXQ_VI;
238 break;
239 case 2:
240 hw_queue_index = RTL_TXQ_BE;
241 break;
242 case 3:
243 hw_queue_index = RTL_TXQ_BK;
244 break;
245 default:
246 hw_queue_index = RTL_TXQ_BE;
247 RT_ASSERT(false, ("QSLT_BE queue, skb_queue:%d\n",
248 mac80211_queue_index));
249 break;
250 }
251out:
252 return hw_queue_index;
253}
254
255static enum rtl_desc_qsel _rtl8192cu_mq_to_descq(struct ieee80211_hw *hw,
256 __le16 fc, u16 mac80211_queue_index)
257{
258 enum rtl_desc_qsel qsel;
259 struct rtl_priv *rtlpriv = rtl_priv(hw);
260
261 if (unlikely(ieee80211_is_beacon(fc))) {
262 qsel = QSLT_BEACON;
263 goto out;
264 }
265 if (ieee80211_is_mgmt(fc)) {
266 qsel = QSLT_MGNT;
267 goto out;
268 }
269 switch (mac80211_queue_index) {
270 case 0: /* VO */
271 qsel = QSLT_VO;
272 RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG,
273 ("VO queue, set qsel = 0x%x\n", QSLT_VO));
274 break;
275 case 1: /* VI */
276 qsel = QSLT_VI;
277 RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG,
278 ("VI queue, set qsel = 0x%x\n", QSLT_VI));
279 break;
280 case 3: /* BK */
281 qsel = QSLT_BK;
282 RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG,
283 ("BK queue, set qsel = 0x%x\n", QSLT_BK));
284 break;
285 case 2: /* BE */
286 default:
287 qsel = QSLT_BE;
288 RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG,
289 ("BE queue, set qsel = 0x%x\n", QSLT_BE));
290 break;
291 }
292out:
293 return qsel;
294}
295
296/* =============================================================== */
297
298/*----------------------------------------------------------------------
299 *
300 * Rx handler
301 *
302 *---------------------------------------------------------------------- */
303bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
304 struct rtl_stats *stats,
305 struct ieee80211_rx_status *rx_status,
306 u8 *p_desc, struct sk_buff *skb)
307{
308 struct rx_fwinfo_92c *p_drvinfo;
309 struct rx_desc_92c *pdesc = (struct rx_desc_92c *)p_desc;
310 u32 phystatus = GET_RX_DESC_PHY_STATUS(pdesc);
311
312 stats->length = (u16) GET_RX_DESC_PKT_LEN(pdesc);
313 stats->rx_drvinfo_size = (u8)GET_RX_DESC_DRVINFO_SIZE(pdesc) *
314 RX_DRV_INFO_SIZE_UNIT;
315 stats->rx_bufshift = (u8) (GET_RX_DESC_SHIFT(pdesc) & 0x03);
316 stats->icv = (u16) GET_RX_DESC_ICV(pdesc);
317 stats->crc = (u16) GET_RX_DESC_CRC32(pdesc);
318 stats->hwerror = (stats->crc | stats->icv);
319 stats->decrypted = !GET_RX_DESC_SWDEC(pdesc);
320 stats->rate = (u8) GET_RX_DESC_RX_MCS(pdesc);
321 stats->shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc);
322 stats->isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1);
323 stats->isampdu = (bool) ((GET_RX_DESC_PAGGR(pdesc) == 1)
324 && (GET_RX_DESC_FAGGR(pdesc) == 1));
325 stats->timestamp_low = GET_RX_DESC_TSFL(pdesc);
326 stats->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc);
327 rx_status->freq = hw->conf.channel->center_freq;
328 rx_status->band = hw->conf.channel->band;
329 if (GET_RX_DESC_CRC32(pdesc))
330 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
331 if (!GET_RX_DESC_SWDEC(pdesc))
332 rx_status->flag |= RX_FLAG_DECRYPTED;
333 if (GET_RX_DESC_BW(pdesc))
334 rx_status->flag |= RX_FLAG_40MHZ;
335 if (GET_RX_DESC_RX_HT(pdesc))
336 rx_status->flag |= RX_FLAG_HT;
337 rx_status->flag |= RX_FLAG_TSFT;
338 if (stats->decrypted)
339 rx_status->flag |= RX_FLAG_DECRYPTED;
340 rx_status->rate_idx = _rtl92c_rate_mapping(hw,
341 (bool)GET_RX_DESC_RX_HT(pdesc),
342 (u8)GET_RX_DESC_RX_MCS(pdesc),
343 (bool)GET_RX_DESC_PAGGR(pdesc));
344 rx_status->mactime = GET_RX_DESC_TSFL(pdesc);
345 if (phystatus == true) {
346 p_drvinfo = (struct rx_fwinfo_92c *)(pdesc + RTL_RX_DESC_SIZE);
347 rtl92c_translate_rx_signal_stuff(hw, skb, stats, pdesc,
348 p_drvinfo);
349 }
350 /*rx_status->qual = stats->signal; */
351 rx_status->signal = stats->rssi + 10;
352 /*rx_status->noise = -stats->noise; */
353 return true;
354}
355
356#define RTL_RX_DRV_INFO_UNIT 8
357
358static void _rtl_rx_process(struct ieee80211_hw *hw, struct sk_buff *skb)
359{
360 struct ieee80211_rx_status *rx_status =
361 (struct ieee80211_rx_status *)IEEE80211_SKB_RXCB(skb);
362 u32 skb_len, pkt_len, drvinfo_len;
363 struct rtl_priv *rtlpriv = rtl_priv(hw);
364 u8 *rxdesc;
365 struct rtl_stats stats = {
366 .signal = 0,
367 .noise = -98,
368 .rate = 0,
369 };
370 struct rx_fwinfo_92c *p_drvinfo;
371 bool bv;
372 __le16 fc;
373 struct ieee80211_hdr *hdr;
374
375 memset(rx_status, 0, sizeof(rx_status));
376 rxdesc = skb->data;
377 skb_len = skb->len;
378 drvinfo_len = (GET_RX_DESC_DRVINFO_SIZE(rxdesc) * RTL_RX_DRV_INFO_UNIT);
379 pkt_len = GET_RX_DESC_PKT_LEN(rxdesc);
380 /* TODO: Error recovery. drop this skb or something. */
381 WARN_ON(skb_len < (pkt_len + RTL_RX_DESC_SIZE + drvinfo_len));
382 stats.length = (u16) GET_RX_DESC_PKT_LEN(rxdesc);
383 stats.rx_drvinfo_size = (u8)GET_RX_DESC_DRVINFO_SIZE(rxdesc) *
384 RX_DRV_INFO_SIZE_UNIT;
385 stats.rx_bufshift = (u8) (GET_RX_DESC_SHIFT(rxdesc) & 0x03);
386 stats.icv = (u16) GET_RX_DESC_ICV(rxdesc);
387 stats.crc = (u16) GET_RX_DESC_CRC32(rxdesc);
388 stats.hwerror = (stats.crc | stats.icv);
389 stats.decrypted = !GET_RX_DESC_SWDEC(rxdesc);
390 stats.rate = (u8) GET_RX_DESC_RX_MCS(rxdesc);
391 stats.shortpreamble = (u16) GET_RX_DESC_SPLCP(rxdesc);
392 stats.isampdu = (bool) ((GET_RX_DESC_PAGGR(rxdesc) == 1)
393 && (GET_RX_DESC_FAGGR(rxdesc) == 1));
394 stats.timestamp_low = GET_RX_DESC_TSFL(rxdesc);
395 stats.rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(rxdesc);
396 /* TODO: is center_freq changed when doing scan? */
397 /* TODO: Shall we add protection or just skip those two step? */
398 rx_status->freq = hw->conf.channel->center_freq;
399 rx_status->band = hw->conf.channel->band;
400 if (GET_RX_DESC_CRC32(rxdesc))
401 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
402 if (!GET_RX_DESC_SWDEC(rxdesc))
403 rx_status->flag |= RX_FLAG_DECRYPTED;
404 if (GET_RX_DESC_BW(rxdesc))
405 rx_status->flag |= RX_FLAG_40MHZ;
406 if (GET_RX_DESC_RX_HT(rxdesc))
407 rx_status->flag |= RX_FLAG_HT;
408 /* Data rate */
409 rx_status->rate_idx = _rtl92c_rate_mapping(hw,
410 (bool)GET_RX_DESC_RX_HT(rxdesc),
411 (u8)GET_RX_DESC_RX_MCS(rxdesc),
412 (bool)GET_RX_DESC_PAGGR(rxdesc)
413 );
414 /* There is a phy status after this rx descriptor. */
415 if (GET_RX_DESC_PHY_STATUS(rxdesc)) {
416 p_drvinfo = (struct rx_fwinfo_92c *)(rxdesc + RTL_RX_DESC_SIZE);
417 rtl92c_translate_rx_signal_stuff(hw, skb, &stats,
418 (struct rx_desc_92c *)rxdesc, p_drvinfo);
419 }
420 skb_pull(skb, (drvinfo_len + RTL_RX_DESC_SIZE));
421 hdr = (struct ieee80211_hdr *)(skb->data);
422 fc = hdr->frame_control;
423 bv = ieee80211_is_probe_resp(fc);
424 if (bv)
425 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
426 ("Got probe response frame.\n"));
427 if (ieee80211_is_beacon(fc))
428 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
429 ("Got beacon frame.\n"));
430 if (ieee80211_is_data(fc))
431 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("Got data frame.\n"));
432 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
433 ("Fram: fc = 0x%X addr1 = 0x%02X:0x%02X:0x%02X:0x%02X:0x%02X:"
434 "0x%02X\n", fc, (u32)hdr->addr1[0], (u32)hdr->addr1[1],
435 (u32)hdr->addr1[2], (u32)hdr->addr1[3], (u32)hdr->addr1[4],
436 (u32)hdr->addr1[5]));
437 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
438 ieee80211_rx_irqsafe(hw, skb);
439}
440
441void rtl8192cu_rx_hdl(struct ieee80211_hw *hw, struct sk_buff * skb)
442{
443 _rtl_rx_process(hw, skb);
444}
445
446void rtl8192c_rx_segregate_hdl(
447 struct ieee80211_hw *hw,
448 struct sk_buff *skb,
449 struct sk_buff_head *skb_list)
450{
451}
452
453/*----------------------------------------------------------------------
454 *
455 * Tx handler
456 *
457 *---------------------------------------------------------------------- */
458void rtl8192c_tx_cleanup(struct ieee80211_hw *hw, struct sk_buff *skb)
459{
460}
461
462int rtl8192c_tx_post_hdl(struct ieee80211_hw *hw, struct urb *urb,
463 struct sk_buff *skb)
464{
465 return 0;
466}
467
468struct sk_buff *rtl8192c_tx_aggregate_hdl(struct ieee80211_hw *hw,
469 struct sk_buff_head *list)
470{
471 return skb_dequeue(list);
472}
473
474/*======================================== trx ===============================*/
475
476static void _rtl_fill_usb_tx_desc(u8 *txdesc)
477{
478 SET_TX_DESC_OWN(txdesc, 1);
479 SET_TX_DESC_LAST_SEG(txdesc, 1);
480 SET_TX_DESC_FIRST_SEG(txdesc, 1);
481}
482/**
483 * For HW recovery information
484 */
485static void _rtl_tx_desc_checksum(u8 *txdesc)
486{
487 u16 *ptr = (u16 *)txdesc;
488 u16 checksum = 0;
489 u32 index;
490
491 /* Clear first */
492 SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, 0);
493 for (index = 0; index < 16; index++)
494 checksum = checksum ^ (*(ptr + index));
495 SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, checksum);
496}
497
498void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
499 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
500 struct ieee80211_tx_info *info, struct sk_buff *skb,
501 unsigned int queue_index)
502{
503 struct rtl_priv *rtlpriv = rtl_priv(hw);
504 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
505 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
506 bool defaultadapter = true;
507 struct ieee80211_sta *sta = ieee80211_find_sta(mac->vif, mac->bssid);
508 struct rtl_tcb_desc tcb_desc;
509 u8 *qc = ieee80211_get_qos_ctl(hdr);
510 u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
511 u16 seq_number;
512 __le16 fc = hdr->frame_control;
513 u8 rate_flag = info->control.rates[0].flags;
514 u16 pktlen = skb->len;
515 enum rtl_desc_qsel fw_qsel = _rtl8192cu_mq_to_descq(hw, fc,
516 skb_get_queue_mapping(skb));
517 u8 *txdesc;
518
519 seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
520 rtl_get_tcb_desc(hw, info, skb, &tcb_desc);
521 txdesc = (u8 *)skb_push(skb, RTL_TX_HEADER_SIZE);
522 memset(txdesc, 0, RTL_TX_HEADER_SIZE);
523 SET_TX_DESC_PKT_SIZE(txdesc, pktlen);
524 SET_TX_DESC_LINIP(txdesc, 0);
525 SET_TX_DESC_PKT_OFFSET(txdesc, RTL_DUMMY_OFFSET);
526 SET_TX_DESC_OFFSET(txdesc, RTL_TX_HEADER_SIZE);
527 SET_TX_DESC_TX_RATE(txdesc, tcb_desc.hw_rate);
528 if (tcb_desc.use_shortgi || tcb_desc.use_shortpreamble)
529 SET_TX_DESC_DATA_SHORTGI(txdesc, 1);
530 if (mac->tids[tid].agg.agg_state == RTL_AGG_ON &&
531 info->flags & IEEE80211_TX_CTL_AMPDU) {
532 SET_TX_DESC_AGG_ENABLE(txdesc, 1);
533 SET_TX_DESC_MAX_AGG_NUM(txdesc, 0x14);
534 } else {
535 SET_TX_DESC_AGG_BREAK(txdesc, 1);
536 }
537 SET_TX_DESC_SEQ(txdesc, seq_number);
538 SET_TX_DESC_RTS_ENABLE(txdesc, ((tcb_desc.rts_enable &&
539 !tcb_desc.cts_enable) ? 1 : 0));
540 SET_TX_DESC_HW_RTS_ENABLE(txdesc, ((tcb_desc.rts_enable ||
541 tcb_desc.cts_enable) ? 1 : 0));
542 SET_TX_DESC_CTS2SELF(txdesc, ((tcb_desc.cts_enable) ? 1 : 0));
543 SET_TX_DESC_RTS_STBC(txdesc, ((tcb_desc.rts_stbc) ? 1 : 0));
544 SET_TX_DESC_RTS_RATE(txdesc, tcb_desc.rts_rate);
545 SET_TX_DESC_RTS_BW(txdesc, 0);
546 SET_TX_DESC_RTS_SC(txdesc, tcb_desc.rts_sc);
547 SET_TX_DESC_RTS_SHORT(txdesc,
548 ((tcb_desc.rts_rate <= DESC92C_RATE54M) ?
549 (tcb_desc.rts_use_shortpreamble ? 1 : 0)
550 : (tcb_desc.rts_use_shortgi ? 1 : 0)));
551 if (mac->bw_40) {
552 if (tcb_desc.packet_bw) {
553 SET_TX_DESC_DATA_BW(txdesc, 1);
554 SET_TX_DESC_DATA_SC(txdesc, 3);
555 } else {
556 SET_TX_DESC_DATA_BW(txdesc, 0);
557 if (rate_flag & IEEE80211_TX_RC_DUP_DATA)
558 SET_TX_DESC_DATA_SC(txdesc,
559 mac->cur_40_prime_sc);
560 }
561 } else {
562 SET_TX_DESC_DATA_BW(txdesc, 0);
563 SET_TX_DESC_DATA_SC(txdesc, 0);
564 }
565 if (sta) {
566 u8 ampdu_density = sta->ht_cap.ampdu_density;
567 SET_TX_DESC_AMPDU_DENSITY(txdesc, ampdu_density);
568 }
569 if (info->control.hw_key) {
570 struct ieee80211_key_conf *keyconf = info->control.hw_key;
571 switch (keyconf->cipher) {
572 case WLAN_CIPHER_SUITE_WEP40:
573 case WLAN_CIPHER_SUITE_WEP104:
574 case WLAN_CIPHER_SUITE_TKIP:
575 SET_TX_DESC_SEC_TYPE(txdesc, 0x1);
576 break;
577 case WLAN_CIPHER_SUITE_CCMP:
578 SET_TX_DESC_SEC_TYPE(txdesc, 0x3);
579 break;
580 default:
581 SET_TX_DESC_SEC_TYPE(txdesc, 0x0);
582 break;
583 }
584 }
585 SET_TX_DESC_PKT_ID(txdesc, 0);
586 SET_TX_DESC_QUEUE_SEL(txdesc, fw_qsel);
587 SET_TX_DESC_DATA_RATE_FB_LIMIT(txdesc, 0x1F);
588 SET_TX_DESC_RTS_RATE_FB_LIMIT(txdesc, 0xF);
589 SET_TX_DESC_DISABLE_FB(txdesc, 0);
590 SET_TX_DESC_USE_RATE(txdesc, tcb_desc.use_driver_rate ? 1 : 0);
591 if (ieee80211_is_data_qos(fc)) {
592 if (mac->rdg_en) {
593 RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
594 ("Enable RDG function.\n"));
595 SET_TX_DESC_RDG_ENABLE(txdesc, 1);
596 SET_TX_DESC_HTC(txdesc, 1);
597 }
598 }
599 if (rtlpriv->dm.useramask) {
600 SET_TX_DESC_RATE_ID(txdesc, tcb_desc.ratr_index);
601 SET_TX_DESC_MACID(txdesc, tcb_desc.mac_id);
602 } else {
603 SET_TX_DESC_RATE_ID(txdesc, 0xC + tcb_desc.ratr_index);
604 SET_TX_DESC_MACID(txdesc, tcb_desc.ratr_index);
605 }
606 if ((!ieee80211_is_data_qos(fc)) && ppsc->leisure_ps &&
607 ppsc->fwctrl_lps) {
608 SET_TX_DESC_HWSEQ_EN(txdesc, 1);
609 SET_TX_DESC_PKT_ID(txdesc, 8);
610 if (!defaultadapter)
611 SET_TX_DESC_QOS(txdesc, 1);
612 }
613 if (ieee80211_has_morefrags(fc))
614 SET_TX_DESC_MORE_FRAG(txdesc, 1);
615 if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) ||
616 is_broadcast_ether_addr(ieee80211_get_DA(hdr)))
617 SET_TX_DESC_BMC(txdesc, 1);
618 _rtl_fill_usb_tx_desc(txdesc);
619 _rtl_tx_desc_checksum(txdesc);
620 RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, (" %s ==>\n", __func__));
621}
622
623void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 * pDesc,
624 u32 buffer_len, bool bIsPsPoll)
625{
626 /* Clear all status */
627 memset(pDesc, 0, RTL_TX_HEADER_SIZE);
628 SET_TX_DESC_FIRST_SEG(pDesc, 1); /* bFirstSeg; */
629 SET_TX_DESC_LAST_SEG(pDesc, 1); /* bLastSeg; */
630 SET_TX_DESC_OFFSET(pDesc, RTL_TX_HEADER_SIZE); /* Offset = 32 */
631 SET_TX_DESC_PKT_SIZE(pDesc, buffer_len); /* Buffer size + command hdr */
632 SET_TX_DESC_QUEUE_SEL(pDesc, QSLT_MGNT); /* Fixed queue of Mgnt queue */
633 /* Set NAVUSEHDR to prevent Ps-poll AId filed to be changed to error
634 * vlaue by Hw. */
635 if (bIsPsPoll) {
636 SET_TX_DESC_NAV_USE_HDR(pDesc, 1);
637 } else {
638 SET_TX_DESC_HWSEQ_EN(pDesc, 1); /* Hw set sequence number */
639 SET_TX_DESC_PKT_ID(pDesc, 0x100); /* set bit3 to 1. */
640 }
641 SET_TX_DESC_USE_RATE(pDesc, 1); /* use data rate which is set by Sw */
642 SET_TX_DESC_OWN(pDesc, 1);
643 SET_TX_DESC_TX_RATE(pDesc, DESC92C_RATE1M);
644 _rtl_tx_desc_checksum(pDesc);
645}
646
647void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw,
648 u8 *pdesc, bool firstseg,
649 bool lastseg, struct sk_buff *skb)
650{
651 struct rtl_priv *rtlpriv = rtl_priv(hw);
652 u8 fw_queue = QSLT_BEACON;
653 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
654 __le16 fc = hdr->frame_control;
655
656 memset((void *)pdesc, 0, RTL_TX_HEADER_SIZE);
657 if (firstseg)
658 SET_TX_DESC_OFFSET(pdesc, RTL_TX_HEADER_SIZE);
659 SET_TX_DESC_TX_RATE(pdesc, DESC92C_RATE1M);
660 SET_TX_DESC_SEQ(pdesc, 0);
661 SET_TX_DESC_LINIP(pdesc, 0);
662 SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue);
663 SET_TX_DESC_FIRST_SEG(pdesc, 1);
664 SET_TX_DESC_LAST_SEG(pdesc, 1);
665 SET_TX_DESC_RATE_ID(pdesc, 7);
666 SET_TX_DESC_MACID(pdesc, 0);
667 SET_TX_DESC_OWN(pdesc, 1);
668 SET_TX_DESC_PKT_SIZE((u8 *) pdesc, (u16) (skb->len));
669 SET_TX_DESC_FIRST_SEG(pdesc, 1);
670 SET_TX_DESC_LAST_SEG(pdesc, 1);
671 SET_TX_DESC_OFFSET(pdesc, 0x20);
672 SET_TX_DESC_USE_RATE(pdesc, 1);
673 if (!ieee80211_is_data_qos(fc)) {
674 SET_TX_DESC_HWSEQ_EN(pdesc, 1);
675 SET_TX_DESC_PKT_ID(pdesc, 8);
676 }
677 RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, "H2C Tx Cmd Content\n",
678 pdesc, RTL_TX_DESC_SIZE);
679}
680
681bool rtl92cu_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb)
682{
683 return true;
684}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
new file mode 100644
index 00000000000..b396d46edbb
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
@@ -0,0 +1,430 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL92CU_TRX_H__
31#define __RTL92CU_TRX_H__
32
33#define RTL92C_USB_BULK_IN_NUM 1
34#define RTL92C_NUM_RX_URBS 8
35#define RTL92C_NUM_TX_URBS 32
36
37#define RTL92C_SIZE_MAX_RX_BUFFER 15360 /* 8192 */
38#define RX_DRV_INFO_SIZE_UNIT 8
39
40enum usb_rx_agg_mode {
41 USB_RX_AGG_DISABLE,
42 USB_RX_AGG_DMA,
43 USB_RX_AGG_USB,
44 USB_RX_AGG_DMA_USB
45};
46
47#define TX_SELE_HQ BIT(0) /* High Queue */
48#define TX_SELE_LQ BIT(1) /* Low Queue */
49#define TX_SELE_NQ BIT(2) /* Normal Queue */
50
51#define RTL_USB_TX_AGG_NUM_DESC 5
52
53#define RTL_USB_RX_AGG_PAGE_NUM 4
54#define RTL_USB_RX_AGG_PAGE_TIMEOUT 3
55
56#define RTL_USB_RX_AGG_BLOCK_NUM 5
57#define RTL_USB_RX_AGG_BLOCK_TIMEOUT 3
58
59/*======================== rx status =========================================*/
60
61struct rx_drv_info_92c {
62 /*
63 * Driver info contain PHY status and other variabel size info
64 * PHY Status content as below
65 */
66
67 /* DWORD 0 */
68 u8 gain_trsw[4];
69
70 /* DWORD 1 */
71 u8 pwdb_all;
72 u8 cfosho[4];
73
74 /* DWORD 2 */
75 u8 cfotail[4];
76
77 /* DWORD 3 */
78 s8 rxevm[2];
79 s8 rxsnr[4];
80
81 /* DWORD 4 */
82 u8 pdsnr[2];
83
84 /* DWORD 5 */
85 u8 csi_current[2];
86 u8 csi_target[2];
87
88 /* DWORD 6 */
89 u8 sigevm;
90 u8 max_ex_pwr;
91 u8 ex_intf_flag:1;
92 u8 sgi_en:1;
93 u8 rxsc:2;
94 u8 reserve:4;
95} __packed;
96
97/* Define a macro that takes a le32 word, converts it to host ordering,
98 * right shifts by a specified count, creates a mask of the specified
99 * bit count, and extracts that number of bits.
100 */
101
102#define SHIFT_AND_MASK_LE(__pdesc, __shift, __bits) \
103 ((le32_to_cpu(*(((__le32 *)(__pdesc)))) >> (__shift)) & \
104 BIT_LEN_MASK_32(__bits))
105
106/* Define a macro that clears a bit field in an le32 word and
107 * sets the specified value into that bit field. The resulting
108 * value remains in le32 ordering; however, it is properly converted
109 * to host ordering for the clear and set operations before conversion
110 * back to le32.
111 */
112
113#define SET_BITS_OFFSET_LE(__pdesc, __shift, __len, __val) \
114 (*(__le32 *)(__pdesc) = \
115 (cpu_to_le32((le32_to_cpu(*((__le32 *)(__pdesc))) & \
116 (~(BIT_OFFSET_LEN_MASK_32((__shift), __len)))) | \
117 (((u32)(__val) & BIT_LEN_MASK_32(__len)) << (__shift)))));
118
119/* macros to read various fields in RX descriptor */
120
121/* DWORD 0 */
122#define GET_RX_DESC_PKT_LEN(__rxdesc) \
123 SHIFT_AND_MASK_LE((__rxdesc), 0, 14)
124#define GET_RX_DESC_CRC32(__rxdesc) \
125 SHIFT_AND_MASK_LE(__rxdesc, 14, 1)
126#define GET_RX_DESC_ICV(__rxdesc) \
127 SHIFT_AND_MASK_LE(__rxdesc, 15, 1)
128#define GET_RX_DESC_DRVINFO_SIZE(__rxdesc) \
129 SHIFT_AND_MASK_LE(__rxdesc, 16, 4)
130#define GET_RX_DESC_SECURITY(__rxdesc) \
131 SHIFT_AND_MASK_LE(__rxdesc, 20, 3)
132#define GET_RX_DESC_QOS(__rxdesc) \
133 SHIFT_AND_MASK_LE(__rxdesc, 23, 1)
134#define GET_RX_DESC_SHIFT(__rxdesc) \
135 SHIFT_AND_MASK_LE(__rxdesc, 24, 2)
136#define GET_RX_DESC_PHY_STATUS(__rxdesc) \
137 SHIFT_AND_MASK_LE(__rxdesc, 26, 1)
138#define GET_RX_DESC_SWDEC(__rxdesc) \
139 SHIFT_AND_MASK_LE(__rxdesc, 27, 1)
140#define GET_RX_DESC_LAST_SEG(__rxdesc) \
141 SHIFT_AND_MASK_LE(__rxdesc, 28, 1)
142#define GET_RX_DESC_FIRST_SEG(__rxdesc) \
143 SHIFT_AND_MASK_LE(__rxdesc, 29, 1)
144#define GET_RX_DESC_EOR(__rxdesc) \
145 SHIFT_AND_MASK_LE(__rxdesc, 30, 1)
146#define GET_RX_DESC_OWN(__rxdesc) \
147 SHIFT_AND_MASK_LE(__rxdesc, 31, 1)
148
149/* DWORD 1 */
150#define GET_RX_DESC_MACID(__rxdesc) \
151 SHIFT_AND_MASK_LE(__rxdesc+4, 0, 5)
152#define GET_RX_DESC_TID(__rxdesc) \
153 SHIFT_AND_MASK_LE(__rxdesc+4, 5, 4)
154#define GET_RX_DESC_PAGGR(__rxdesc) \
155 SHIFT_AND_MASK_LE(__rxdesc+4, 14, 1)
156#define GET_RX_DESC_FAGGR(__rxdesc) \
157 SHIFT_AND_MASK_LE(__rxdesc+4, 15, 1)
158#define GET_RX_DESC_A1_FIT(__rxdesc) \
159 SHIFT_AND_MASK_LE(__rxdesc+4, 16, 4)
160#define GET_RX_DESC_A2_FIT(__rxdesc) \
161 SHIFT_AND_MASK_LE(__rxdesc+4, 20, 4)
162#define GET_RX_DESC_PAM(__rxdesc) \
163 SHIFT_AND_MASK_LE(__rxdesc+4, 24, 1)
164#define GET_RX_DESC_PWR(__rxdesc) \
165 SHIFT_AND_MASK_LE(__rxdesc+4, 25, 1)
166#define GET_RX_DESC_MORE_DATA(__rxdesc) \
167 SHIFT_AND_MASK_LE(__rxdesc+4, 26, 1)
168#define GET_RX_DESC_MORE_FRAG(__rxdesc) \
169 SHIFT_AND_MASK_LE(__rxdesc+4, 27, 1)
170#define GET_RX_DESC_TYPE(__rxdesc) \
171 SHIFT_AND_MASK_LE(__rxdesc+4, 28, 2)
172#define GET_RX_DESC_MC(__rxdesc) \
173 SHIFT_AND_MASK_LE(__rxdesc+4, 30, 1)
174#define GET_RX_DESC_BC(__rxdesc) \
175 SHIFT_AND_MASK_LE(__rxdesc+4, 31, 1)
176
177/* DWORD 2 */
178#define GET_RX_DESC_SEQ(__rxdesc) \
179 SHIFT_AND_MASK_LE(__rxdesc+8, 0, 12)
180#define GET_RX_DESC_FRAG(__rxdesc) \
181 SHIFT_AND_MASK_LE(__rxdesc+8, 12, 4)
182#define GET_RX_DESC_USB_AGG_PKTNUM(__rxdesc) \
183 SHIFT_AND_MASK_LE(__rxdesc+8, 16, 8)
184#define GET_RX_DESC_NEXT_IND(__rxdesc) \
185 SHIFT_AND_MASK_LE(__rxdesc+8, 30, 1)
186
187/* DWORD 3 */
188#define GET_RX_DESC_RX_MCS(__rxdesc) \
189 SHIFT_AND_MASK_LE(__rxdesc+12, 0, 6)
190#define GET_RX_DESC_RX_HT(__rxdesc) \
191 SHIFT_AND_MASK_LE(__rxdesc+12, 6, 1)
192#define GET_RX_DESC_AMSDU(__rxdesc) \
193 SHIFT_AND_MASK_LE(__rxdesc+12, 7, 1)
194#define GET_RX_DESC_SPLCP(__rxdesc) \
195 SHIFT_AND_MASK_LE(__rxdesc+12, 8, 1)
196#define GET_RX_DESC_BW(__rxdesc) \
197 SHIFT_AND_MASK_LE(__rxdesc+12, 9, 1)
198#define GET_RX_DESC_HTC(__rxdesc) \
199 SHIFT_AND_MASK_LE(__rxdesc+12, 10, 1)
200#define GET_RX_DESC_TCP_CHK_RPT(__rxdesc) \
201 SHIFT_AND_MASK_LE(__rxdesc+12, 11, 1)
202#define GET_RX_DESC_IP_CHK_RPT(__rxdesc) \
203 SHIFT_AND_MASK_LE(__rxdesc+12, 12, 1)
204#define GET_RX_DESC_TCP_CHK_VALID(__rxdesc) \
205 SHIFT_AND_MASK_LE(__rxdesc+12, 13, 1)
206#define GET_RX_DESC_HWPC_ERR(__rxdesc) \
207 SHIFT_AND_MASK_LE(__rxdesc+12, 14, 1)
208#define GET_RX_DESC_HWPC_IND(__rxdesc) \
209 SHIFT_AND_MASK_LE(__rxdesc+12, 15, 1)
210#define GET_RX_DESC_IV0(__rxdesc) \
211 SHIFT_AND_MASK_LE(__rxdesc+12, 16, 16)
212
213/* DWORD 4 */
214#define GET_RX_DESC_IV1(__rxdesc) \
215 SHIFT_AND_MASK_LE(__rxdesc+16, 0, 32)
216
217/* DWORD 5 */
218#define GET_RX_DESC_TSFL(__rxdesc) \
219 SHIFT_AND_MASK_LE(__rxdesc+20, 0, 32)
220
221/*======================= tx desc ============================================*/
222
223/* macros to set various fields in TX descriptor */
224
225/* Dword 0 */
226#define SET_TX_DESC_PKT_SIZE(__txdesc, __value) \
227 SET_BITS_OFFSET_LE(__txdesc, 0, 16, __value)
228#define SET_TX_DESC_OFFSET(__txdesc, __value) \
229 SET_BITS_OFFSET_LE(__txdesc, 16, 8, __value)
230#define SET_TX_DESC_BMC(__txdesc, __value) \
231 SET_BITS_OFFSET_LE(__txdesc, 24, 1, __value)
232#define SET_TX_DESC_HTC(__txdesc, __value) \
233 SET_BITS_OFFSET_LE(__txdesc, 25, 1, __value)
234#define SET_TX_DESC_LAST_SEG(__txdesc, __value) \
235 SET_BITS_OFFSET_LE(__txdesc, 26, 1, __value)
236#define SET_TX_DESC_FIRST_SEG(__txdesc, __value) \
237 SET_BITS_OFFSET_LE(__txdesc, 27, 1, __value)
238#define SET_TX_DESC_LINIP(__txdesc, __value) \
239 SET_BITS_OFFSET_LE(__txdesc, 28, 1, __value)
240#define SET_TX_DESC_NO_ACM(__txdesc, __value) \
241 SET_BITS_OFFSET_LE(__txdesc, 29, 1, __value)
242#define SET_TX_DESC_GF(__txdesc, __value) \
243 SET_BITS_OFFSET_LE(__txdesc, 30, 1, __value)
244#define SET_TX_DESC_OWN(__txdesc, __value) \
245 SET_BITS_OFFSET_LE(__txdesc, 31, 1, __value)
246
247
248/* Dword 1 */
249#define SET_TX_DESC_MACID(__txdesc, __value) \
250 SET_BITS_OFFSET_LE(__txdesc+4, 0, 5, __value)
251#define SET_TX_DESC_AGG_ENABLE(__txdesc, __value) \
252 SET_BITS_OFFSET_LE(__txdesc+4, 5, 1, __value)
253#define SET_TX_DESC_AGG_BREAK(__txdesc, __value) \
254 SET_BITS_OFFSET_LE(__txdesc+4, 6, 1, __value)
255#define SET_TX_DESC_RDG_ENABLE(__txdesc, __value) \
256 SET_BITS_OFFSET_LE(__txdesc+4, 7, 1, __value)
257#define SET_TX_DESC_QUEUE_SEL(__txdesc, __value) \
258 SET_BITS_OFFSET_LE(__txdesc+4, 8, 5, __value)
259#define SET_TX_DESC_RDG_NAV_EXT(__txdesc, __value) \
260 SET_BITS_OFFSET_LE(__txdesc+4, 13, 1, __value)
261#define SET_TX_DESC_LSIG_TXOP_EN(__txdesc, __value) \
262 SET_BITS_OFFSET_LE(__txdesc+4, 14, 1, __value)
263#define SET_TX_DESC_PIFS(__txdesc, __value) \
264 SET_BITS_OFFSET_LE(__txdesc+4, 15, 1, __value)
265#define SET_TX_DESC_RATE_ID(__txdesc, __value) \
266 SET_BITS_OFFSET_LE(__txdesc+4, 16, 4, __value)
267#define SET_TX_DESC_RA_BRSR_ID(__txdesc, __value) \
268 SET_BITS_OFFSET_LE(__txdesc+4, 16, 4, __value)
269#define SET_TX_DESC_NAV_USE_HDR(__txdesc, __value) \
270 SET_BITS_OFFSET_LE(__txdesc+4, 20, 1, __value)
271#define SET_TX_DESC_EN_DESC_ID(__txdesc, __value) \
272 SET_BITS_OFFSET_LE(__txdesc+4, 21, 1, __value)
273#define SET_TX_DESC_SEC_TYPE(__txdesc, __value) \
274 SET_BITS_OFFSET_LE(__txdesc+4, 22, 2, __value)
275#define SET_TX_DESC_PKT_OFFSET(__txdesc, __value) \
276 SET_BITS_OFFSET_LE(__txdesc+4, 26, 5, __value)
277
278/* Dword 2 */
279#define SET_TX_DESC_RTS_RC(__txdesc, __value) \
280 SET_BITS_OFFSET_LE(__txdesc+8, 0, 6, __value)
281#define SET_TX_DESC_DATA_RC(__txdesc, __value) \
282 SET_BITS_OFFSET_LE(__txdesc+8, 6, 6, __value)
283#define SET_TX_DESC_BAR_RTY_TH(__txdesc, __value) \
284 SET_BITS_OFFSET_LE(__txdesc+8, 14, 2, __value)
285#define SET_TX_DESC_MORE_FRAG(__txdesc, __value) \
286 SET_BITS_OFFSET_LE(__txdesc+8, 17, 1, __value)
287#define SET_TX_DESC_RAW(__txdesc, __value) \
288 SET_BITS_OFFSET_LE(__txdesc+8, 18, 1, __value)
289#define SET_TX_DESC_CCX(__txdesc, __value) \
290 SET_BITS_OFFSET_LE(__txdesc+8, 19, 1, __value)
291#define SET_TX_DESC_AMPDU_DENSITY(__txdesc, __value) \
292 SET_BITS_OFFSET_LE(__txdesc+8, 20, 3, __value)
293#define SET_TX_DESC_ANTSEL_A(__txdesc, __value) \
294 SET_BITS_OFFSET_LE(__txdesc+8, 24, 1, __value)
295#define SET_TX_DESC_ANTSEL_B(__txdesc, __value) \
296 SET_BITS_OFFSET_LE(__txdesc+8, 25, 1, __value)
297#define SET_TX_DESC_TX_ANT_CCK(__txdesc, __value) \
298 SET_BITS_OFFSET_LE(__txdesc+8, 26, 2, __value)
299#define SET_TX_DESC_TX_ANTL(__txdesc, __value) \
300 SET_BITS_OFFSET_LE(__txdesc+8, 28, 2, __value)
301#define SET_TX_DESC_TX_ANT_HT(__txdesc, __value) \
302 SET_BITS_OFFSET_LE(__txdesc+8, 30, 2, __value)
303
304/* Dword 3 */
305#define SET_TX_DESC_NEXT_HEAP_PAGE(__txdesc, __value) \
306 SET_BITS_OFFSET_LE(__txdesc+12, 0, 8, __value)
307#define SET_TX_DESC_TAIL_PAGE(__txdesc, __value) \
308 SET_BITS_OFFSET_LE(__txdesc+12, 8, 8, __value)
309#define SET_TX_DESC_SEQ(__txdesc, __value) \
310 SET_BITS_OFFSET_LE(__txdesc+12, 16, 12, __value)
311#define SET_TX_DESC_PKT_ID(__txdesc, __value) \
312 SET_BITS_OFFSET_LE(__txdesc+12, 28, 4, __value)
313
314/* Dword 4 */
315#define SET_TX_DESC_RTS_RATE(__txdesc, __value) \
316 SET_BITS_OFFSET_LE(__txdesc+16, 0, 5, __value)
317#define SET_TX_DESC_AP_DCFE(__txdesc, __value) \
318 SET_BITS_OFFSET_LE(__txdesc+16, 5, 1, __value)
319#define SET_TX_DESC_QOS(__txdesc, __value) \
320 SET_BITS_OFFSET_LE(__txdesc+16, 6, 1, __value)
321#define SET_TX_DESC_HWSEQ_EN(__txdesc, __value) \
322 SET_BITS_OFFSET_LE(__txdesc+16, 7, 1, __value)
323#define SET_TX_DESC_USE_RATE(__txdesc, __value) \
324 SET_BITS_OFFSET_LE(__txdesc+16, 8, 1, __value)
325#define SET_TX_DESC_DISABLE_RTS_FB(__txdesc, __value) \
326 SET_BITS_OFFSET_LE(__txdesc+16, 9, 1, __value)
327#define SET_TX_DESC_DISABLE_FB(__txdesc, __value) \
328 SET_BITS_OFFSET_LE(__txdesc+16, 10, 1, __value)
329#define SET_TX_DESC_CTS2SELF(__txdesc, __value) \
330 SET_BITS_OFFSET_LE(__txdesc+16, 11, 1, __value)
331#define SET_TX_DESC_RTS_ENABLE(__txdesc, __value) \
332 SET_BITS_OFFSET_LE(__txdesc+16, 12, 1, __value)
333#define SET_TX_DESC_HW_RTS_ENABLE(__txdesc, __value) \
334 SET_BITS_OFFSET_LE(__txdesc+16, 13, 1, __value)
335#define SET_TX_DESC_WAIT_DCTS(__txdesc, __value) \
336 SET_BITS_OFFSET_LE(__txdesc+16, 18, 1, __value)
337#define SET_TX_DESC_CTS2AP_EN(__txdesc, __value) \
338 SET_BITS_OFFSET_LE(__txdesc+16, 19, 1, __value)
339#define SET_TX_DESC_DATA_SC(__txdesc, __value) \
340 SET_BITS_OFFSET_LE(__txdesc+16, 20, 2, __value)
341#define SET_TX_DESC_DATA_STBC(__txdesc, __value) \
342 SET_BITS_OFFSET_LE(__txdesc+16, 22, 2, __value)
343#define SET_TX_DESC_DATA_SHORT(__txdesc, __value) \
344 SET_BITS_OFFSET_LE(__txdesc+16, 24, 1, __value)
345#define SET_TX_DESC_DATA_BW(__txdesc, __value) \
346 SET_BITS_OFFSET_LE(__txdesc+16, 25, 1, __value)
347#define SET_TX_DESC_RTS_SHORT(__txdesc, __value) \
348 SET_BITS_OFFSET_LE(__txdesc+16, 26, 1, __value)
349#define SET_TX_DESC_RTS_BW(__txdesc, __value) \
350 SET_BITS_OFFSET_LE(__txdesc+16, 27, 1, __value)
351#define SET_TX_DESC_RTS_SC(__txdesc, __value) \
352 SET_BITS_OFFSET_LE(__txdesc+16, 28, 2, __value)
353#define SET_TX_DESC_RTS_STBC(__txdesc, __value) \
354 SET_BITS_OFFSET_LE(__txdesc+16, 30, 2, __value)
355
356/* Dword 5 */
357#define SET_TX_DESC_TX_RATE(__pdesc, __val) \
358 SET_BITS_OFFSET_LE(__pdesc+20, 0, 6, __val)
359#define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val) \
360 SET_BITS_OFFSET_LE(__pdesc+20, 6, 1, __val)
361#define SET_TX_DESC_CCX_TAG(__pdesc, __val) \
362 SET_BITS_OFFSET_LE(__pdesc+20, 7, 1, __val)
363#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__txdesc, __value) \
364 SET_BITS_OFFSET_LE(__txdesc+20, 8, 5, __value)
365#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__txdesc, __value) \
366 SET_BITS_OFFSET_LE(__txdesc+20, 13, 4, __value)
367#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__txdesc, __value) \
368 SET_BITS_OFFSET_LE(__txdesc+20, 17, 1, __value)
369#define SET_TX_DESC_DATA_RETRY_LIMIT(__txdesc, __value) \
370 SET_BITS_OFFSET_LE(__txdesc+20, 18, 6, __value)
371#define SET_TX_DESC_USB_TXAGG_NUM(__txdesc, __value) \
372 SET_BITS_OFFSET_LE(__txdesc+20, 24, 8, __value)
373
374/* Dword 6 */
375#define SET_TX_DESC_TXAGC_A(__txdesc, __value) \
376 SET_BITS_OFFSET_LE(__txdesc+24, 0, 5, __value)
377#define SET_TX_DESC_TXAGC_B(__txdesc, __value) \
378 SET_BITS_OFFSET_LE(__txdesc+24, 5, 5, __value)
379#define SET_TX_DESC_USB_MAX_LEN(__txdesc, __value) \
380 SET_BITS_OFFSET_LE(__txdesc+24, 10, 1, __value)
381#define SET_TX_DESC_MAX_AGG_NUM(__txdesc, __value) \
382 SET_BITS_OFFSET_LE(__txdesc+24, 11, 5, __value)
383#define SET_TX_DESC_MCSG1_MAX_LEN(__txdesc, __value) \
384 SET_BITS_OFFSET_LE(__txdesc+24, 16, 4, __value)
385#define SET_TX_DESC_MCSG2_MAX_LEN(__txdesc, __value) \
386 SET_BITS_OFFSET_LE(__txdesc+24, 20, 4, __value)
387#define SET_TX_DESC_MCSG3_MAX_LEN(__txdesc, __value) \
388 SET_BITS_OFFSET_LE(__txdesc+24, 24, 4, __value)
389#define SET_TX_DESC_MCSG7_MAX_LEN(__txdesc, __value) \
390 SET_BITS_OFFSET_LE(__txdesc+24, 28, 4, __value)
391
392/* Dword 7 */
393#define SET_TX_DESC_TX_DESC_CHECKSUM(__txdesc, __value) \
394 SET_BITS_OFFSET_LE(__txdesc+28, 0, 16, __value)
395#define SET_TX_DESC_MCSG4_MAX_LEN(__txdesc, __value) \
396 SET_BITS_OFFSET_LE(__txdesc+28, 16, 4, __value)
397#define SET_TX_DESC_MCSG5_MAX_LEN(__txdesc, __value) \
398 SET_BITS_OFFSET_LE(__txdesc+28, 20, 4, __value)
399#define SET_TX_DESC_MCSG6_MAX_LEN(__txdesc, __value) \
400 SET_BITS_OFFSET_LE(__txdesc+28, 24, 4, __value)
401#define SET_TX_DESC_MCSG15_MAX_LEN(__txdesc, __value) \
402 SET_BITS_OFFSET_LE(__txdesc+28, 28, 4, __value)
403
404
405int rtl8192cu_endpoint_mapping(struct ieee80211_hw *hw);
406u16 rtl8192cu_mq_to_hwq(__le16 fc, u16 mac80211_queue_index);
407bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
408 struct rtl_stats *stats,
409 struct ieee80211_rx_status *rx_status,
410 u8 *p_desc, struct sk_buff *skb);
411void rtl8192cu_rx_hdl(struct ieee80211_hw *hw, struct sk_buff * skb);
412void rtl8192c_rx_segregate_hdl(struct ieee80211_hw *, struct sk_buff *,
413 struct sk_buff_head *);
414void rtl8192c_tx_cleanup(struct ieee80211_hw *hw, struct sk_buff *skb);
415int rtl8192c_tx_post_hdl(struct ieee80211_hw *hw, struct urb *urb,
416 struct sk_buff *skb);
417struct sk_buff *rtl8192c_tx_aggregate_hdl(struct ieee80211_hw *,
418 struct sk_buff_head *);
419void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
420 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
421 struct ieee80211_tx_info *info, struct sk_buff *skb,
422 unsigned int queue_index);
423void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 * pDesc,
424 u32 buffer_len, bool bIsPsPoll);
425void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw,
426 u8 *pdesc, bool b_firstseg,
427 bool b_lastseg, struct sk_buff *skb);
428bool rtl92cu_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb);
429
430#endif
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
new file mode 100644
index 00000000000..a4b2613d6a8
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -0,0 +1,1035 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2011 Realtek Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 *****************************************************************************/
27#include <linux/usb.h>
28#include "core.h"
29#include "wifi.h"
30#include "usb.h"
31#include "base.h"
32#include "ps.h"
33
34#define REALTEK_USB_VENQT_READ 0xC0
35#define REALTEK_USB_VENQT_WRITE 0x40
36#define REALTEK_USB_VENQT_CMD_REQ 0x05
37#define REALTEK_USB_VENQT_CMD_IDX 0x00
38
39#define REALTEK_USB_VENQT_MAX_BUF_SIZE 254
40
41static void usbctrl_async_callback(struct urb *urb)
42{
43 if (urb)
44 kfree(urb->context);
45}
46
47static int _usbctrl_vendorreq_async_write(struct usb_device *udev, u8 request,
48 u16 value, u16 index, void *pdata,
49 u16 len)
50{
51 int rc;
52 unsigned int pipe;
53 u8 reqtype;
54 struct usb_ctrlrequest *dr;
55 struct urb *urb;
56 struct rtl819x_async_write_data {
57 u8 data[REALTEK_USB_VENQT_MAX_BUF_SIZE];
58 struct usb_ctrlrequest dr;
59 } *buf;
60
61 pipe = usb_sndctrlpipe(udev, 0); /* write_out */
62 reqtype = REALTEK_USB_VENQT_WRITE;
63
64 buf = kmalloc(sizeof(*buf), GFP_ATOMIC);
65 if (!buf)
66 return -ENOMEM;
67
68 urb = usb_alloc_urb(0, GFP_ATOMIC);
69 if (!urb) {
70 kfree(buf);
71 return -ENOMEM;
72 }
73
74 dr = &buf->dr;
75
76 dr->bRequestType = reqtype;
77 dr->bRequest = request;
78 dr->wValue = cpu_to_le16(value);
79 dr->wIndex = cpu_to_le16(index);
80 dr->wLength = cpu_to_le16(len);
81 memcpy(buf, pdata, len);
82 usb_fill_control_urb(urb, udev, pipe,
83 (unsigned char *)dr, buf, len,
84 usbctrl_async_callback, buf);
85 rc = usb_submit_urb(urb, GFP_ATOMIC);
86 if (rc < 0)
87 kfree(buf);
88 usb_free_urb(urb);
89 return rc;
90}
91
92static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request,
93 u16 value, u16 index, void *pdata,
94 u16 len)
95{
96 unsigned int pipe;
97 int status;
98 u8 reqtype;
99
100 pipe = usb_rcvctrlpipe(udev, 0); /* read_in */
101 reqtype = REALTEK_USB_VENQT_READ;
102
103 status = usb_control_msg(udev, pipe, request, reqtype, value, index,
104 pdata, len, 0); /* max. timeout */
105
106 if (status < 0)
107 printk(KERN_ERR "reg 0x%x, usbctrl_vendorreq TimeOut! "
108 "status:0x%x value=0x%x\n", value, status,
109 *(u32 *)pdata);
110 return status;
111}
112
113static u32 _usb_read_sync(struct usb_device *udev, u32 addr, u16 len)
114{
115 u8 request;
116 u16 wvalue;
117 u16 index;
118 u32 *data;
119 u32 ret;
120
121 data = kmalloc(sizeof(u32), GFP_KERNEL);
122 if (!data)
123 return -ENOMEM;
124 request = REALTEK_USB_VENQT_CMD_REQ;
125 index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
126
127 wvalue = (u16)addr;
128 _usbctrl_vendorreq_sync_read(udev, request, wvalue, index, data, len);
129 ret = *data;
130 kfree(data);
131 return ret;
132}
133
134static u8 _usb_read8_sync(struct rtl_priv *rtlpriv, u32 addr)
135{
136 struct device *dev = rtlpriv->io.dev;
137
138 return (u8)_usb_read_sync(to_usb_device(dev), addr, 1);
139}
140
141static u16 _usb_read16_sync(struct rtl_priv *rtlpriv, u32 addr)
142{
143 struct device *dev = rtlpriv->io.dev;
144
145 return (u16)_usb_read_sync(to_usb_device(dev), addr, 2);
146}
147
148static u32 _usb_read32_sync(struct rtl_priv *rtlpriv, u32 addr)
149{
150 struct device *dev = rtlpriv->io.dev;
151
152 return _usb_read_sync(to_usb_device(dev), addr, 4);
153}
154
155static void _usb_write_async(struct usb_device *udev, u32 addr, u32 val,
156 u16 len)
157{
158 u8 request;
159 u16 wvalue;
160 u16 index;
161 u32 data;
162
163 request = REALTEK_USB_VENQT_CMD_REQ;
164 index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
165 wvalue = (u16)(addr&0x0000ffff);
166 data = val;
167 _usbctrl_vendorreq_async_write(udev, request, wvalue, index, &data,
168 len);
169}
170
171static void _usb_write8_async(struct rtl_priv *rtlpriv, u32 addr, u8 val)
172{
173 struct device *dev = rtlpriv->io.dev;
174
175 _usb_write_async(to_usb_device(dev), addr, val, 1);
176}
177
178static void _usb_write16_async(struct rtl_priv *rtlpriv, u32 addr, u16 val)
179{
180 struct device *dev = rtlpriv->io.dev;
181
182 _usb_write_async(to_usb_device(dev), addr, val, 2);
183}
184
185static void _usb_write32_async(struct rtl_priv *rtlpriv, u32 addr, u32 val)
186{
187 struct device *dev = rtlpriv->io.dev;
188
189 _usb_write_async(to_usb_device(dev), addr, val, 4);
190}
191
192static int _usb_nbytes_read_write(struct usb_device *udev, bool read, u32 addr,
193 u16 len, u8 *pdata)
194{
195 int status;
196 u8 request;
197 u16 wvalue;
198 u16 index;
199
200 request = REALTEK_USB_VENQT_CMD_REQ;
201 index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
202 wvalue = (u16)addr;
203 if (read)
204 status = _usbctrl_vendorreq_sync_read(udev, request, wvalue,
205 index, pdata, len);
206 else
207 status = _usbctrl_vendorreq_async_write(udev, request, wvalue,
208 index, pdata, len);
209 return status;
210}
211
212static int _usb_readN_sync(struct rtl_priv *rtlpriv, u32 addr, u16 len,
213 u8 *pdata)
214{
215 struct device *dev = rtlpriv->io.dev;
216
217 return _usb_nbytes_read_write(to_usb_device(dev), true, addr, len,
218 pdata);
219}
220
221static int _usb_writeN_async(struct rtl_priv *rtlpriv, u32 addr, u16 len,
222 u8 *pdata)
223{
224 struct device *dev = rtlpriv->io.dev;
225
226 return _usb_nbytes_read_write(to_usb_device(dev), false, addr, len,
227 pdata);
228}
229
230static void _rtl_usb_io_handler_init(struct device *dev,
231 struct ieee80211_hw *hw)
232{
233 struct rtl_priv *rtlpriv = rtl_priv(hw);
234
235 rtlpriv->io.dev = dev;
236 mutex_init(&rtlpriv->io.bb_mutex);
237 rtlpriv->io.write8_async = _usb_write8_async;
238 rtlpriv->io.write16_async = _usb_write16_async;
239 rtlpriv->io.write32_async = _usb_write32_async;
240 rtlpriv->io.writeN_async = _usb_writeN_async;
241 rtlpriv->io.read8_sync = _usb_read8_sync;
242 rtlpriv->io.read16_sync = _usb_read16_sync;
243 rtlpriv->io.read32_sync = _usb_read32_sync;
244 rtlpriv->io.readN_sync = _usb_readN_sync;
245}
246
247static void _rtl_usb_io_handler_release(struct ieee80211_hw *hw)
248{
249 struct rtl_priv *rtlpriv = rtl_priv(hw);
250
251 mutex_destroy(&rtlpriv->io.bb_mutex);
252}
253
254/**
255 *
256 * Default aggregation handler. Do nothing and just return the oldest skb.
257 */
258static struct sk_buff *_none_usb_tx_aggregate_hdl(struct ieee80211_hw *hw,
259 struct sk_buff_head *list)
260{
261 return skb_dequeue(list);
262}
263
264#define IS_HIGH_SPEED_USB(udev) \
265 ((USB_SPEED_HIGH == (udev)->speed) ? true : false)
266
267static int _rtl_usb_init_tx(struct ieee80211_hw *hw)
268{
269 u32 i;
270 struct rtl_priv *rtlpriv = rtl_priv(hw);
271 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
272
273 rtlusb->max_bulk_out_size = IS_HIGH_SPEED_USB(rtlusb->udev)
274 ? USB_HIGH_SPEED_BULK_SIZE
275 : USB_FULL_SPEED_BULK_SIZE;
276
277 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("USB Max Bulk-out Size=%d\n",
278 rtlusb->max_bulk_out_size));
279
280 for (i = 0; i < __RTL_TXQ_NUM; i++) {
281 u32 ep_num = rtlusb->ep_map.ep_mapping[i];
282 if (!ep_num) {
283 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
284 ("Invalid endpoint map setting!\n"));
285 return -EINVAL;
286 }
287 }
288
289 rtlusb->usb_tx_post_hdl =
290 rtlpriv->cfg->usb_interface_cfg->usb_tx_post_hdl;
291 rtlusb->usb_tx_cleanup =
292 rtlpriv->cfg->usb_interface_cfg->usb_tx_cleanup;
293 rtlusb->usb_tx_aggregate_hdl =
294 (rtlpriv->cfg->usb_interface_cfg->usb_tx_aggregate_hdl)
295 ? rtlpriv->cfg->usb_interface_cfg->usb_tx_aggregate_hdl
296 : &_none_usb_tx_aggregate_hdl;
297
298 init_usb_anchor(&rtlusb->tx_submitted);
299 for (i = 0; i < RTL_USB_MAX_EP_NUM; i++) {
300 skb_queue_head_init(&rtlusb->tx_skb_queue[i]);
301 init_usb_anchor(&rtlusb->tx_pending[i]);
302 }
303 return 0;
304}
305
306static int _rtl_usb_init_rx(struct ieee80211_hw *hw)
307{
308 struct rtl_priv *rtlpriv = rtl_priv(hw);
309 struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
310 struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
311
312 rtlusb->rx_max_size = rtlpriv->cfg->usb_interface_cfg->rx_max_size;
313 rtlusb->rx_urb_num = rtlpriv->cfg->usb_interface_cfg->rx_urb_num;
314 rtlusb->in_ep = rtlpriv->cfg->usb_interface_cfg->in_ep_num;
315 rtlusb->usb_rx_hdl = rtlpriv->cfg->usb_interface_cfg->usb_rx_hdl;
316 rtlusb->usb_rx_segregate_hdl =
317 rtlpriv->cfg->usb_interface_cfg->usb_rx_segregate_hdl;
318
319 printk(KERN_INFO "rtl8192cu: rx_max_size %d, rx_urb_num %d, in_ep %d\n",
320 rtlusb->rx_max_size, rtlusb->rx_urb_num, rtlusb->in_ep);
321 init_usb_anchor(&rtlusb->rx_submitted);
322 return 0;
323}
324
325static int _rtl_usb_init(struct ieee80211_hw *hw)
326{
327 struct rtl_priv *rtlpriv = rtl_priv(hw);
328 struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
329 struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
330 int err;
331 u8 epidx;
332 struct usb_interface *usb_intf = rtlusb->intf;
333 u8 epnums = usb_intf->cur_altsetting->desc.bNumEndpoints;
334
335 rtlusb->out_ep_nums = rtlusb->in_ep_nums = 0;
336 for (epidx = 0; epidx < epnums; epidx++) {
337 struct usb_endpoint_descriptor *pep_desc;
338 pep_desc = &usb_intf->cur_altsetting->endpoint[epidx].desc;
339
340 if (usb_endpoint_dir_in(pep_desc))
341 rtlusb->in_ep_nums++;
342 else if (usb_endpoint_dir_out(pep_desc))
343 rtlusb->out_ep_nums++;
344
345 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
346 ("USB EP(0x%02x), MaxPacketSize=%d ,Interval=%d.\n",
347 pep_desc->bEndpointAddress, pep_desc->wMaxPacketSize,
348 pep_desc->bInterval));
349 }
350 if (rtlusb->in_ep_nums < rtlpriv->cfg->usb_interface_cfg->in_ep_num)
351 return -EINVAL ;
352
353 /* usb endpoint mapping */
354 err = rtlpriv->cfg->usb_interface_cfg->usb_endpoint_mapping(hw);
355 rtlusb->usb_mq_to_hwq = rtlpriv->cfg->usb_interface_cfg->usb_mq_to_hwq;
356 _rtl_usb_init_tx(hw);
357 _rtl_usb_init_rx(hw);
358 return err;
359}
360
361static int _rtl_usb_init_sw(struct ieee80211_hw *hw)
362{
363 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
364 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
365 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
366 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
367
368 rtlhal->hw = hw;
369 ppsc->inactiveps = false;
370 ppsc->leisure_ps = false;
371 ppsc->fwctrl_lps = false;
372 ppsc->reg_fwctrl_lps = 3;
373 ppsc->reg_max_lps_awakeintvl = 5;
374 ppsc->fwctrl_psmode = FW_PS_DTIM_MODE;
375
376 /* IBSS */
377 mac->beacon_interval = 100;
378
379 /* AMPDU */
380 mac->min_space_cfg = 0;
381 mac->max_mss_density = 0;
382
383 /* set sane AMPDU defaults */
384 mac->current_ampdu_density = 7;
385 mac->current_ampdu_factor = 3;
386
387 /* QOS */
388 rtlusb->acm_method = eAcmWay2_SW;
389
390 /* IRQ */
391 /* HIMR - turn all on */
392 rtlusb->irq_mask[0] = 0xFFFFFFFF;
393 /* HIMR_EX - turn all on */
394 rtlusb->irq_mask[1] = 0xFFFFFFFF;
395 rtlusb->disableHWSM = true;
396 return 0;
397}
398
399#define __RADIO_TAP_SIZE_RSV 32
400
401static void _rtl_rx_completed(struct urb *urb);
402
403static struct sk_buff *_rtl_prep_rx_urb(struct ieee80211_hw *hw,
404 struct rtl_usb *rtlusb,
405 struct urb *urb,
406 gfp_t gfp_mask)
407{
408 struct sk_buff *skb;
409 struct rtl_priv *rtlpriv = rtl_priv(hw);
410
411 skb = __dev_alloc_skb((rtlusb->rx_max_size + __RADIO_TAP_SIZE_RSV),
412 gfp_mask);
413 if (!skb) {
414 RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
415 ("Failed to __dev_alloc_skb!!\n"))
416 return ERR_PTR(-ENOMEM);
417 }
418
419 /* reserve some space for mac80211's radiotap */
420 skb_reserve(skb, __RADIO_TAP_SIZE_RSV);
421 usb_fill_bulk_urb(urb, rtlusb->udev,
422 usb_rcvbulkpipe(rtlusb->udev, rtlusb->in_ep),
423 skb->data, min(skb_tailroom(skb),
424 (int)rtlusb->rx_max_size),
425 _rtl_rx_completed, skb);
426
427 _rtl_install_trx_info(rtlusb, skb, rtlusb->in_ep);
428 return skb;
429}
430
431#undef __RADIO_TAP_SIZE_RSV
432
433static void _rtl_usb_rx_process_agg(struct ieee80211_hw *hw,
434 struct sk_buff *skb)
435{
436 struct rtl_priv *rtlpriv = rtl_priv(hw);
437 u8 *rxdesc = skb->data;
438 struct ieee80211_hdr *hdr;
439 bool unicast = false;
440 __le16 fc;
441 struct ieee80211_rx_status rx_status = {0};
442 struct rtl_stats stats = {
443 .signal = 0,
444 .noise = -98,
445 .rate = 0,
446 };
447
448 skb_pull(skb, RTL_RX_DESC_SIZE);
449 rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, rxdesc, skb);
450 skb_pull(skb, (stats.rx_drvinfo_size + stats.rx_bufshift));
451 hdr = (struct ieee80211_hdr *)(skb->data);
452 fc = hdr->frame_control;
453 if (!stats.crc) {
454 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
455
456 if (is_broadcast_ether_addr(hdr->addr1)) {
457 /*TODO*/;
458 } else if (is_multicast_ether_addr(hdr->addr1)) {
459 /*TODO*/
460 } else {
461 unicast = true;
462 rtlpriv->stats.rxbytesunicast += skb->len;
463 }
464
465 rtl_is_special_data(hw, skb, false);
466
467 if (ieee80211_is_data(fc)) {
468 rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);
469
470 if (unicast)
471 rtlpriv->link_info.num_rx_inperiod++;
472 }
473 }
474}
475
476static void _rtl_usb_rx_process_noagg(struct ieee80211_hw *hw,
477 struct sk_buff *skb)
478{
479 struct rtl_priv *rtlpriv = rtl_priv(hw);
480 u8 *rxdesc = skb->data;
481 struct ieee80211_hdr *hdr;
482 bool unicast = false;
483 __le16 fc;
484 struct ieee80211_rx_status rx_status = {0};
485 struct rtl_stats stats = {
486 .signal = 0,
487 .noise = -98,
488 .rate = 0,
489 };
490
491 skb_pull(skb, RTL_RX_DESC_SIZE);
492 rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, rxdesc, skb);
493 skb_pull(skb, (stats.rx_drvinfo_size + stats.rx_bufshift));
494 hdr = (struct ieee80211_hdr *)(skb->data);
495 fc = hdr->frame_control;
496 if (!stats.crc) {
497 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
498
499 if (is_broadcast_ether_addr(hdr->addr1)) {
500 /*TODO*/;
501 } else if (is_multicast_ether_addr(hdr->addr1)) {
502 /*TODO*/
503 } else {
504 unicast = true;
505 rtlpriv->stats.rxbytesunicast += skb->len;
506 }
507
508 rtl_is_special_data(hw, skb, false);
509
510 if (ieee80211_is_data(fc)) {
511 rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);
512
513 if (unicast)
514 rtlpriv->link_info.num_rx_inperiod++;
515 }
516 if (likely(rtl_action_proc(hw, skb, false))) {
517 struct sk_buff *uskb = NULL;
518 u8 *pdata;
519
520 uskb = dev_alloc_skb(skb->len + 128);
521 memcpy(IEEE80211_SKB_RXCB(uskb), &rx_status,
522 sizeof(rx_status));
523 pdata = (u8 *)skb_put(uskb, skb->len);
524 memcpy(pdata, skb->data, skb->len);
525 dev_kfree_skb_any(skb);
526 ieee80211_rx_irqsafe(hw, uskb);
527 } else {
528 dev_kfree_skb_any(skb);
529 }
530 }
531}
532
533static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb)
534{
535 struct sk_buff *_skb;
536 struct sk_buff_head rx_queue;
537 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
538
539 skb_queue_head_init(&rx_queue);
540 if (rtlusb->usb_rx_segregate_hdl)
541 rtlusb->usb_rx_segregate_hdl(hw, skb, &rx_queue);
542 WARN_ON(skb_queue_empty(&rx_queue));
543 while (!skb_queue_empty(&rx_queue)) {
544 _skb = skb_dequeue(&rx_queue);
545 _rtl_usb_rx_process_agg(hw, skb);
546 ieee80211_rx_irqsafe(hw, skb);
547 }
548}
549
550static void _rtl_rx_completed(struct urb *_urb)
551{
552 struct sk_buff *skb = (struct sk_buff *)_urb->context;
553 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
554 struct rtl_usb *rtlusb = (struct rtl_usb *)info->rate_driver_data[0];
555 struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf);
556 struct rtl_priv *rtlpriv = rtl_priv(hw);
557 int err = 0;
558
559 if (unlikely(IS_USB_STOP(rtlusb)))
560 goto free;
561
562 if (likely(0 == _urb->status)) {
563 /* If this code were moved to work queue, would CPU
564 * utilization be improved? NOTE: We shall allocate another skb
565 * and reuse the original one.
566 */
567 skb_put(skb, _urb->actual_length);
568
569 if (likely(!rtlusb->usb_rx_segregate_hdl)) {
570 struct sk_buff *_skb;
571 _rtl_usb_rx_process_noagg(hw, skb);
572 _skb = _rtl_prep_rx_urb(hw, rtlusb, _urb, GFP_ATOMIC);
573 if (IS_ERR(_skb)) {
574 err = PTR_ERR(_skb);
575 RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
576 ("Can't allocate skb for bulk IN!\n"));
577 return;
578 }
579 skb = _skb;
580 } else{
581 /* TO DO */
582 _rtl_rx_pre_process(hw, skb);
583 printk(KERN_ERR "rtlwifi: rx agg not supported\n");
584 }
585 goto resubmit;
586 }
587
588 switch (_urb->status) {
589 /* disconnect */
590 case -ENOENT:
591 case -ECONNRESET:
592 case -ENODEV:
593 case -ESHUTDOWN:
594 goto free;
595 default:
596 break;
597 }
598
599resubmit:
600 skb_reset_tail_pointer(skb);
601 skb_trim(skb, 0);
602
603 usb_anchor_urb(_urb, &rtlusb->rx_submitted);
604 err = usb_submit_urb(_urb, GFP_ATOMIC);
605 if (unlikely(err)) {
606 usb_unanchor_urb(_urb);
607 goto free;
608 }
609 return;
610
611free:
612 dev_kfree_skb_irq(skb);
613}
614
615static int _rtl_usb_receive(struct ieee80211_hw *hw)
616{
617 struct urb *urb;
618 struct sk_buff *skb;
619 int err;
620 int i;
621 struct rtl_priv *rtlpriv = rtl_priv(hw);
622 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
623
624 WARN_ON(0 == rtlusb->rx_urb_num);
625 /* 1600 == 1514 + max WLAN header + rtk info */
626 WARN_ON(rtlusb->rx_max_size < 1600);
627
628 for (i = 0; i < rtlusb->rx_urb_num; i++) {
629 err = -ENOMEM;
630 urb = usb_alloc_urb(0, GFP_KERNEL);
631 if (!urb) {
632 RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
633 ("Failed to alloc URB!!\n"))
634 goto err_out;
635 }
636
637 skb = _rtl_prep_rx_urb(hw, rtlusb, urb, GFP_KERNEL);
638 if (IS_ERR(skb)) {
639 RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
640 ("Failed to prep_rx_urb!!\n"))
641 err = PTR_ERR(skb);
642 goto err_out;
643 }
644
645 usb_anchor_urb(urb, &rtlusb->rx_submitted);
646 err = usb_submit_urb(urb, GFP_KERNEL);
647 if (err)
648 goto err_out;
649 usb_free_urb(urb);
650 }
651 return 0;
652
653err_out:
654 usb_kill_anchored_urbs(&rtlusb->rx_submitted);
655 return err;
656}
657
658static int rtl_usb_start(struct ieee80211_hw *hw)
659{
660 int err;
661 struct rtl_priv *rtlpriv = rtl_priv(hw);
662 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
663 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
664
665 err = rtlpriv->cfg->ops->hw_init(hw);
666 rtl_init_rx_config(hw);
667
668 /* Enable software */
669 SET_USB_START(rtlusb);
670 /* should after adapter start and interrupt enable. */
671 set_hal_start(rtlhal);
672
673 /* Start bulk IN */
674 _rtl_usb_receive(hw);
675
676 return err;
677}
678/**
679 *
680 *
681 */
682
683/*======================= tx =========================================*/
684static void rtl_usb_cleanup(struct ieee80211_hw *hw)
685{
686 u32 i;
687 struct sk_buff *_skb;
688 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
689 struct ieee80211_tx_info *txinfo;
690
691 SET_USB_STOP(rtlusb);
692
693 /* clean up rx stuff. */
694 usb_kill_anchored_urbs(&rtlusb->rx_submitted);
695
696 /* clean up tx stuff */
697 for (i = 0; i < RTL_USB_MAX_EP_NUM; i++) {
698 while ((_skb = skb_dequeue(&rtlusb->tx_skb_queue[i]))) {
699 rtlusb->usb_tx_cleanup(hw, _skb);
700 txinfo = IEEE80211_SKB_CB(_skb);
701 ieee80211_tx_info_clear_status(txinfo);
702 txinfo->flags |= IEEE80211_TX_STAT_ACK;
703 ieee80211_tx_status_irqsafe(hw, _skb);
704 }
705 usb_kill_anchored_urbs(&rtlusb->tx_pending[i]);
706 }
707 usb_kill_anchored_urbs(&rtlusb->tx_submitted);
708}
709
710/**
711 *
712 * We may add some struct into struct rtl_usb later. Do deinit here.
713 *
714 */
715static void rtl_usb_deinit(struct ieee80211_hw *hw)
716{
717 rtl_usb_cleanup(hw);
718}
719
720static void rtl_usb_stop(struct ieee80211_hw *hw)
721{
722 struct rtl_priv *rtlpriv = rtl_priv(hw);
723 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
724 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
725
726 /* should after adapter start and interrupt enable. */
727 set_hal_stop(rtlhal);
728 /* Enable software */
729 SET_USB_STOP(rtlusb);
730 rtl_usb_deinit(hw);
731 rtlpriv->cfg->ops->hw_disable(hw);
732}
733
734static void _rtl_submit_tx_urb(struct ieee80211_hw *hw, struct urb *_urb)
735{
736 int err;
737 struct rtl_priv *rtlpriv = rtl_priv(hw);
738 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
739
740 usb_anchor_urb(_urb, &rtlusb->tx_submitted);
741 err = usb_submit_urb(_urb, GFP_ATOMIC);
742 if (err < 0) {
743 struct sk_buff *skb;
744
745 RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
746 ("Failed to submit urb.\n"));
747 usb_unanchor_urb(_urb);
748 skb = (struct sk_buff *)_urb->context;
749 kfree_skb(skb);
750 }
751 usb_free_urb(_urb);
752}
753
754static int _usb_tx_post(struct ieee80211_hw *hw, struct urb *urb,
755 struct sk_buff *skb)
756{
757 struct rtl_priv *rtlpriv = rtl_priv(hw);
758 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
759 struct ieee80211_tx_info *txinfo;
760
761 rtlusb->usb_tx_post_hdl(hw, urb, skb);
762 skb_pull(skb, RTL_TX_HEADER_SIZE);
763 txinfo = IEEE80211_SKB_CB(skb);
764 ieee80211_tx_info_clear_status(txinfo);
765 txinfo->flags |= IEEE80211_TX_STAT_ACK;
766
767 if (urb->status) {
768 RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
769 ("Urb has error status 0x%X\n", urb->status));
770 goto out;
771 }
772 /* TODO: statistics */
773out:
774 ieee80211_tx_status_irqsafe(hw, skb);
775 return urb->status;
776}
777
778static void _rtl_tx_complete(struct urb *urb)
779{
780 struct sk_buff *skb = (struct sk_buff *)urb->context;
781 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
782 struct rtl_usb *rtlusb = (struct rtl_usb *)info->rate_driver_data[0];
783 struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf);
784 int err;
785
786 if (unlikely(IS_USB_STOP(rtlusb)))
787 return;
788 err = _usb_tx_post(hw, urb, skb);
789 if (err) {
790 /* Ignore error and keep issuiing other urbs */
791 return;
792 }
793}
794
795static struct urb *_rtl_usb_tx_urb_setup(struct ieee80211_hw *hw,
796 struct sk_buff *skb, u32 ep_num)
797{
798 struct rtl_priv *rtlpriv = rtl_priv(hw);
799 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
800 struct urb *_urb;
801
802 WARN_ON(NULL == skb);
803 _urb = usb_alloc_urb(0, GFP_ATOMIC);
804 if (!_urb) {
805 RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
806 ("Can't allocate URB for bulk out!\n"));
807 kfree_skb(skb);
808 return NULL;
809 }
810 _rtl_install_trx_info(rtlusb, skb, ep_num);
811 usb_fill_bulk_urb(_urb, rtlusb->udev, usb_sndbulkpipe(rtlusb->udev,
812 ep_num), skb->data, skb->len, _rtl_tx_complete, skb);
813 _urb->transfer_flags |= URB_ZERO_PACKET;
814 return _urb;
815}
816
817static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
818 enum rtl_txq qnum)
819{
820 struct rtl_priv *rtlpriv = rtl_priv(hw);
821 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
822 u32 ep_num;
823 struct urb *_urb = NULL;
824 struct sk_buff *_skb = NULL;
825 struct sk_buff_head *skb_list;
826 struct usb_anchor *urb_list;
827
828 WARN_ON(NULL == rtlusb->usb_tx_aggregate_hdl);
829 if (unlikely(IS_USB_STOP(rtlusb))) {
830 RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
831 ("USB device is stopping...\n"));
832 kfree_skb(skb);
833 return;
834 }
835 ep_num = rtlusb->ep_map.ep_mapping[qnum];
836 skb_list = &rtlusb->tx_skb_queue[ep_num];
837 _skb = skb;
838 _urb = _rtl_usb_tx_urb_setup(hw, _skb, ep_num);
839 if (unlikely(!_urb)) {
840 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
841 ("Can't allocate urb. Drop skb!\n"));
842 return;
843 }
844 urb_list = &rtlusb->tx_pending[ep_num];
845 _rtl_submit_tx_urb(hw, _urb);
846}
847
848static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb,
849 u16 hw_queue)
850{
851 struct rtl_priv *rtlpriv = rtl_priv(hw);
852 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
853 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
854 struct rtl_tx_desc *pdesc = NULL;
855 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
856 __le16 fc = hdr->frame_control;
857 u8 *pda_addr = hdr->addr1;
858 /* ssn */
859 u8 *qc = NULL;
860 u8 tid = 0;
861 u16 seq_number = 0;
862
863 if (ieee80211_is_mgmt(fc))
864 rtl_tx_mgmt_proc(hw, skb);
865 rtl_action_proc(hw, skb, true);
866 if (is_multicast_ether_addr(pda_addr))
867 rtlpriv->stats.txbytesmulticast += skb->len;
868 else if (is_broadcast_ether_addr(pda_addr))
869 rtlpriv->stats.txbytesbroadcast += skb->len;
870 else
871 rtlpriv->stats.txbytesunicast += skb->len;
872 if (ieee80211_is_data_qos(fc)) {
873 qc = ieee80211_get_qos_ctl(hdr);
874 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
875 seq_number = (le16_to_cpu(hdr->seq_ctrl) &
876 IEEE80211_SCTL_SEQ) >> 4;
877 seq_number += 1;
878 seq_number <<= 4;
879 }
880 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, info, skb,
881 hw_queue);
882 if (!ieee80211_has_morefrags(hdr->frame_control)) {
883 if (qc)
884 mac->tids[tid].seq_number = seq_number;
885 }
886 if (ieee80211_is_data(fc))
887 rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
888}
889
890static int rtl_usb_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
891{
892 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
893 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
894 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
895 __le16 fc = hdr->frame_control;
896 u16 hw_queue;
897
898 if (unlikely(is_hal_stop(rtlhal)))
899 goto err_free;
900 hw_queue = rtlusb->usb_mq_to_hwq(fc, skb_get_queue_mapping(skb));
901 _rtl_usb_tx_preprocess(hw, skb, hw_queue);
902 _rtl_usb_transmit(hw, skb, hw_queue);
903 return NETDEV_TX_OK;
904
905err_free:
906 dev_kfree_skb_any(skb);
907 return NETDEV_TX_OK;
908}
909
910static bool rtl_usb_tx_chk_waitq_insert(struct ieee80211_hw *hw,
911 struct sk_buff *skb)
912{
913 return false;
914}
915
916static struct rtl_intf_ops rtl_usb_ops = {
917 .adapter_start = rtl_usb_start,
918 .adapter_stop = rtl_usb_stop,
919 .adapter_tx = rtl_usb_tx,
920 .waitq_insert = rtl_usb_tx_chk_waitq_insert,
921};
922
923int __devinit rtl_usb_probe(struct usb_interface *intf,
924 const struct usb_device_id *id)
925{
926 int err;
927 struct ieee80211_hw *hw = NULL;
928 struct rtl_priv *rtlpriv = NULL;
929 struct usb_device *udev;
930 struct rtl_usb_priv *usb_priv;
931
932 hw = ieee80211_alloc_hw(sizeof(struct rtl_priv) +
933 sizeof(struct rtl_usb_priv), &rtl_ops);
934 if (!hw) {
935 RT_ASSERT(false, ("%s : ieee80211 alloc failed\n", __func__));
936 return -ENOMEM;
937 }
938 rtlpriv = hw->priv;
939 SET_IEEE80211_DEV(hw, &intf->dev);
940 udev = interface_to_usbdev(intf);
941 usb_get_dev(udev);
942 usb_priv = rtl_usbpriv(hw);
943 memset(usb_priv, 0, sizeof(*usb_priv));
944 usb_priv->dev.intf = intf;
945 usb_priv->dev.udev = udev;
946 usb_set_intfdata(intf, hw);
947 /* init cfg & intf_ops */
948 rtlpriv->rtlhal.interface = INTF_USB;
949 rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_info);
950 rtlpriv->intf_ops = &rtl_usb_ops;
951 rtl_dbgp_flag_init(hw);
952 /* Init IO handler */
953 _rtl_usb_io_handler_init(&udev->dev, hw);
954 rtlpriv->cfg->ops->read_chip_version(hw);
955 /*like read eeprom and so on */
956 rtlpriv->cfg->ops->read_eeprom_info(hw);
957 if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
958 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
959 ("Can't init_sw_vars.\n"));
960 goto error_out;
961 }
962 rtlpriv->cfg->ops->init_sw_leds(hw);
963 err = _rtl_usb_init(hw);
964 err = _rtl_usb_init_sw(hw);
965 /* Init mac80211 sw */
966 err = rtl_init_core(hw);
967 if (err) {
968 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
969 ("Can't allocate sw for mac80211.\n"));
970 goto error_out;
971 }
972
973 /*init rfkill */
974 /* rtl_init_rfkill(hw); */
975
976 err = ieee80211_register_hw(hw);
977 if (err) {
978 RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
979 ("Can't register mac80211 hw.\n"));
980 goto error_out;
981 } else {
982 rtlpriv->mac80211.mac80211_registered = 1;
983 }
984 set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
985 return 0;
986error_out:
987 rtl_deinit_core(hw);
988 _rtl_usb_io_handler_release(hw);
989 ieee80211_free_hw(hw);
990 usb_put_dev(udev);
991 return -ENODEV;
992}
993EXPORT_SYMBOL(rtl_usb_probe);
994
995void rtl_usb_disconnect(struct usb_interface *intf)
996{
997 struct ieee80211_hw *hw = usb_get_intfdata(intf);
998 struct rtl_priv *rtlpriv = rtl_priv(hw);
999 struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
1000 struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
1001
1002 if (unlikely(!rtlpriv))
1003 return;
1004 /*ieee80211_unregister_hw will call ops_stop */
1005 if (rtlmac->mac80211_registered == 1) {
1006 ieee80211_unregister_hw(hw);
1007 rtlmac->mac80211_registered = 0;
1008 } else {
1009 rtl_deinit_deferred_work(hw);
1010 rtlpriv->intf_ops->adapter_stop(hw);
1011 }
1012 /*deinit rfkill */
1013 /* rtl_deinit_rfkill(hw); */
1014 rtl_usb_deinit(hw);
1015 rtl_deinit_core(hw);
1016 rtlpriv->cfg->ops->deinit_sw_leds(hw);
1017 rtlpriv->cfg->ops->deinit_sw_vars(hw);
1018 _rtl_usb_io_handler_release(hw);
1019 usb_put_dev(rtlusb->udev);
1020 usb_set_intfdata(intf, NULL);
1021 ieee80211_free_hw(hw);
1022}
1023EXPORT_SYMBOL(rtl_usb_disconnect);
1024
1025int rtl_usb_suspend(struct usb_interface *pusb_intf, pm_message_t message)
1026{
1027 return 0;
1028}
1029EXPORT_SYMBOL(rtl_usb_suspend);
1030
1031int rtl_usb_resume(struct usb_interface *pusb_intf)
1032{
1033 return 0;
1034}
1035EXPORT_SYMBOL(rtl_usb_resume);
diff --git a/drivers/net/wireless/rtlwifi/usb.h b/drivers/net/wireless/rtlwifi/usb.h
new file mode 100644
index 00000000000..abadfe918d3
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/usb.h
@@ -0,0 +1,164 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2011 Realtek Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 *****************************************************************************/
27
28#ifndef __RTL_USB_H__
29#define __RTL_USB_H__
30
31#include <linux/usb.h>
32#include <linux/skbuff.h>
33
34#define RTL_USB_DEVICE(vend, prod, cfg) \
35 .match_flags = USB_DEVICE_ID_MATCH_DEVICE, \
36 .idVendor = (vend), \
37 .idProduct = (prod), \
38 .driver_info = (kernel_ulong_t)&(cfg)
39
40#define USB_HIGH_SPEED_BULK_SIZE 512
41#define USB_FULL_SPEED_BULK_SIZE 64
42
43
44#define RTL_USB_MAX_TXQ_NUM 4 /* max tx queue */
45#define RTL_USB_MAX_EP_NUM 6 /* max ep number */
46#define RTL_USB_MAX_TX_URBS_NUM 8
47
48enum rtl_txq {
49 /* These definitions shall be consistent with value
50 * returned by skb_get_queue_mapping
51 *------------------------------------*/
52 RTL_TXQ_BK,
53 RTL_TXQ_BE,
54 RTL_TXQ_VI,
55 RTL_TXQ_VO,
56 /*------------------------------------*/
57 RTL_TXQ_BCN,
58 RTL_TXQ_MGT,
59 RTL_TXQ_HI,
60
61 /* Must be last */
62 __RTL_TXQ_NUM,
63};
64
65struct rtl_ep_map {
66 u32 ep_mapping[__RTL_TXQ_NUM];
67};
68
69struct _trx_info {
70 struct rtl_usb *rtlusb;
71 u32 ep_num;
72};
73
74static inline void _rtl_install_trx_info(struct rtl_usb *rtlusb,
75 struct sk_buff *skb,
76 u32 ep_num)
77{
78 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
79 info->rate_driver_data[0] = rtlusb;
80 info->rate_driver_data[1] = (void *)(__kernel_size_t)ep_num;
81}
82
83
84/* Add suspend/resume later */
85enum rtl_usb_state {
86 USB_STATE_STOP = 0,
87 USB_STATE_START = 1,
88};
89
90#define IS_USB_STOP(rtlusb_ptr) (USB_STATE_STOP == (rtlusb_ptr)->state)
91#define IS_USB_START(rtlusb_ptr) (USB_STATE_START == (rtlusb_ptr)->state)
92#define SET_USB_STOP(rtlusb_ptr) \
93 do { \
94 (rtlusb_ptr)->state = USB_STATE_STOP; \
95 } while (0)
96
97#define SET_USB_START(rtlusb_ptr) \
98 do { \
99 (rtlusb_ptr)->state = USB_STATE_START; \
100 } while (0)
101
102struct rtl_usb {
103 struct usb_device *udev;
104 struct usb_interface *intf;
105 enum rtl_usb_state state;
106
107 /* Bcn control register setting */
108 u32 reg_bcn_ctrl_val;
109 /* for 88/92cu card disable */
110 u8 disableHWSM;
111 /*QOS & EDCA */
112 enum acm_method acm_method;
113 /* irq . HIMR,HIMR_EX */
114 u32 irq_mask[2];
115 bool irq_enabled;
116
117 u16 (*usb_mq_to_hwq)(__le16 fc, u16 mac80211_queue_index);
118
119 /* Tx */
120 u8 out_ep_nums ;
121 u8 out_queue_sel;
122 struct rtl_ep_map ep_map;
123
124 u32 max_bulk_out_size;
125 u32 tx_submitted_urbs;
126 struct sk_buff_head tx_skb_queue[RTL_USB_MAX_EP_NUM];
127
128 struct usb_anchor tx_pending[RTL_USB_MAX_EP_NUM];
129 struct usb_anchor tx_submitted;
130
131 struct sk_buff *(*usb_tx_aggregate_hdl)(struct ieee80211_hw *,
132 struct sk_buff_head *);
133 int (*usb_tx_post_hdl)(struct ieee80211_hw *,
134 struct urb *, struct sk_buff *);
135 void (*usb_tx_cleanup)(struct ieee80211_hw *, struct sk_buff *);
136
137 /* Rx */
138 u8 in_ep_nums ;
139 u32 in_ep; /* Bulk IN endpoint number */
140 u32 rx_max_size; /* Bulk IN max buffer size */
141 u32 rx_urb_num; /* How many Bulk INs are submitted to host. */
142 struct usb_anchor rx_submitted;
143 void (*usb_rx_segregate_hdl)(struct ieee80211_hw *, struct sk_buff *,
144 struct sk_buff_head *);
145 void (*usb_rx_hdl)(struct ieee80211_hw *, struct sk_buff *);
146};
147
148struct rtl_usb_priv {
149 struct rtl_usb dev;
150 struct rtl_led_ctl ledctl;
151};
152
153#define rtl_usbpriv(hw) (((struct rtl_usb_priv *)(rtl_priv(hw))->priv))
154#define rtl_usbdev(usbpriv) (&((usbpriv)->dev))
155
156
157
158int __devinit rtl_usb_probe(struct usb_interface *intf,
159 const struct usb_device_id *id);
160void rtl_usb_disconnect(struct usb_interface *intf);
161int rtl_usb_suspend(struct usb_interface *pusb_intf, pm_message_t message);
162int rtl_usb_resume(struct usb_interface *pusb_intf);
163
164#endif
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index d44d79613d2..7d47184d6bf 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -34,6 +34,8 @@
34#include <linux/firmware.h> 34#include <linux/firmware.h>
35#include <linux/version.h> 35#include <linux/version.h>
36#include <linux/etherdevice.h> 36#include <linux/etherdevice.h>
37#include <linux/vmalloc.h>
38#include <linux/usb.h>
37#include <net/mac80211.h> 39#include <net/mac80211.h>
38#include "debug.h" 40#include "debug.h"
39 41
@@ -82,6 +84,19 @@
82#define MAC80211_3ADDR_LEN 24 84#define MAC80211_3ADDR_LEN 24
83#define MAC80211_4ADDR_LEN 30 85#define MAC80211_4ADDR_LEN 30
84 86
87#define CHANNEL_MAX_NUMBER (14 + 24 + 21) /* 14 is the max channel no */
88#define CHANNEL_GROUP_MAX (3 + 9) /* ch1~3, 4~9, 10~14 = three groups */
89#define MAX_PG_GROUP 13
90#define CHANNEL_GROUP_MAX_2G 3
91#define CHANNEL_GROUP_IDX_5GL 3
92#define CHANNEL_GROUP_IDX_5GM 6
93#define CHANNEL_GROUP_IDX_5GH 9
94#define CHANNEL_GROUP_MAX_5G 9
95#define CHANNEL_MAX_NUMBER_2G 14
96#define AVG_THERMAL_NUM 8
97
98/* for early mode */
99#define EM_HDR_LEN 8
85enum intf_type { 100enum intf_type {
86 INTF_PCI = 0, 101 INTF_PCI = 0,
87 INTF_USB = 1, 102 INTF_USB = 1,
@@ -113,11 +128,38 @@ enum hardware_type {
113 HARDWARE_TYPE_RTL8192CU, 128 HARDWARE_TYPE_RTL8192CU,
114 HARDWARE_TYPE_RTL8192DE, 129 HARDWARE_TYPE_RTL8192DE,
115 HARDWARE_TYPE_RTL8192DU, 130 HARDWARE_TYPE_RTL8192DU,
131 HARDWARE_TYPE_RTL8723E,
132 HARDWARE_TYPE_RTL8723U,
116 133
117 /*keep it last*/ 134 /* keep it last */
118 HARDWARE_TYPE_NUM 135 HARDWARE_TYPE_NUM
119}; 136};
120 137
138#define IS_HARDWARE_TYPE_8192SU(rtlhal) \
139 (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SU)
140#define IS_HARDWARE_TYPE_8192SE(rtlhal) \
141 (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
142#define IS_HARDWARE_TYPE_8192CE(rtlhal) \
143 (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CE)
144#define IS_HARDWARE_TYPE_8192CU(rtlhal) \
145 (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CU)
146#define IS_HARDWARE_TYPE_8192DE(rtlhal) \
147 (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE)
148#define IS_HARDWARE_TYPE_8192DU(rtlhal) \
149 (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DU)
150#define IS_HARDWARE_TYPE_8723E(rtlhal) \
151 (rtlhal->hw_type == HARDWARE_TYPE_RTL8723E)
152#define IS_HARDWARE_TYPE_8723U(rtlhal) \
153 (rtlhal->hw_type == HARDWARE_TYPE_RTL8723U)
154#define IS_HARDWARE_TYPE_8192S(rtlhal) \
155(IS_HARDWARE_TYPE_8192SE(rtlhal) || IS_HARDWARE_TYPE_8192SU(rtlhal))
156#define IS_HARDWARE_TYPE_8192C(rtlhal) \
157(IS_HARDWARE_TYPE_8192CE(rtlhal) || IS_HARDWARE_TYPE_8192CU(rtlhal))
158#define IS_HARDWARE_TYPE_8192D(rtlhal) \
159(IS_HARDWARE_TYPE_8192DE(rtlhal) || IS_HARDWARE_TYPE_8192DU(rtlhal))
160#define IS_HARDWARE_TYPE_8723(rtlhal) \
161(IS_HARDWARE_TYPE_8723E(rtlhal) || IS_HARDWARE_TYPE_8723U(rtlhal))
162
121enum scan_operation_backup_opt { 163enum scan_operation_backup_opt {
122 SCAN_OPT_BACKUP = 0, 164 SCAN_OPT_BACKUP = 0,
123 SCAN_OPT_RESTORE, 165 SCAN_OPT_RESTORE,
@@ -315,6 +357,7 @@ enum rf_type {
315 RF_1T1R = 0, 357 RF_1T1R = 0,
316 RF_1T2R = 1, 358 RF_1T2R = 1,
317 RF_2T2R = 2, 359 RF_2T2R = 2,
360 RF_2T2R_GREEN = 3,
318}; 361};
319 362
320enum ht_channel_width { 363enum ht_channel_width {
@@ -359,6 +402,8 @@ enum rtl_var_map {
359 EFUSE_LOADER_CLK_EN, 402 EFUSE_LOADER_CLK_EN,
360 EFUSE_ANA8M, 403 EFUSE_ANA8M,
361 EFUSE_HWSET_MAX_SIZE, 404 EFUSE_HWSET_MAX_SIZE,
405 EFUSE_MAX_SECTION_MAP,
406 EFUSE_REAL_CONTENT_SIZE,
362 407
363 /*CAM map */ 408 /*CAM map */
364 RWCAM, 409 RWCAM,
@@ -397,6 +442,7 @@ enum rtl_var_map {
397 RTL_IMR_ATIMEND, /*For 92C,ATIM Window End Interrupt */ 442 RTL_IMR_ATIMEND, /*For 92C,ATIM Window End Interrupt */
398 RTL_IMR_BDOK, /*Beacon Queue DMA OK Interrup */ 443 RTL_IMR_BDOK, /*Beacon Queue DMA OK Interrup */
399 RTL_IMR_HIGHDOK, /*High Queue DMA OK Interrupt */ 444 RTL_IMR_HIGHDOK, /*High Queue DMA OK Interrupt */
445 RTL_IMR_COMDOK, /*Command Queue DMA OK Interrupt*/
400 RTL_IMR_TBDOK, /*Transmit Beacon OK interrup */ 446 RTL_IMR_TBDOK, /*Transmit Beacon OK interrup */
401 RTL_IMR_MGNTDOK, /*Management Queue DMA OK Interrupt */ 447 RTL_IMR_MGNTDOK, /*Management Queue DMA OK Interrupt */
402 RTL_IMR_TBDER, /*For 92C,Transmit Beacon Error Interrupt */ 448 RTL_IMR_TBDER, /*For 92C,Transmit Beacon Error Interrupt */
@@ -405,7 +451,8 @@ enum rtl_var_map {
405 RTL_IMR_VIDOK, /*AC_VI DMA OK Interrupt */ 451 RTL_IMR_VIDOK, /*AC_VI DMA OK Interrupt */
406 RTL_IMR_VODOK, /*AC_VO DMA Interrupt */ 452 RTL_IMR_VODOK, /*AC_VO DMA Interrupt */
407 RTL_IMR_ROK, /*Receive DMA OK Interrupt */ 453 RTL_IMR_ROK, /*Receive DMA OK Interrupt */
408 RTL_IBSS_INT_MASKS, /*(RTL_IMR_BcnInt|RTL_IMR_TBDOK|RTL_IMR_TBDER)*/ 454 RTL_IBSS_INT_MASKS, /*(RTL_IMR_BcnInt | RTL_IMR_TBDOK |
455 * RTL_IMR_TBDER) */
409 456
410 /*CCK Rates, TxHT = 0 */ 457 /*CCK Rates, TxHT = 0 */
411 RTL_RC_CCK_RATE1M, 458 RTL_RC_CCK_RATE1M,
@@ -481,6 +528,19 @@ enum acm_method {
481 eAcmWay2_SW = 2, 528 eAcmWay2_SW = 2,
482}; 529};
483 530
531enum macphy_mode {
532 SINGLEMAC_SINGLEPHY = 0,
533 DUALMAC_DUALPHY,
534 DUALMAC_SINGLEPHY,
535};
536
537enum band_type {
538 BAND_ON_2_4G = 0,
539 BAND_ON_5G,
540 BAND_ON_BOTH,
541 BANDMAX
542};
543
484/*aci/aifsn Field. 544/*aci/aifsn Field.
485Ref: WMM spec 2.2.2: WME Parameter Element, p.12.*/ 545Ref: WMM spec 2.2.2: WME Parameter Element, p.12.*/
486union aci_aifsn { 546union aci_aifsn {
@@ -505,6 +565,17 @@ enum wireless_mode {
505 WIRELESS_MODE_N_5G = 0x20 565 WIRELESS_MODE_N_5G = 0x20
506}; 566};
507 567
568#define IS_WIRELESS_MODE_A(wirelessmode) \
569 (wirelessmode == WIRELESS_MODE_A)
570#define IS_WIRELESS_MODE_B(wirelessmode) \
571 (wirelessmode == WIRELESS_MODE_B)
572#define IS_WIRELESS_MODE_G(wirelessmode) \
573 (wirelessmode == WIRELESS_MODE_G)
574#define IS_WIRELESS_MODE_N_24G(wirelessmode) \
575 (wirelessmode == WIRELESS_MODE_N_24G)
576#define IS_WIRELESS_MODE_N_5G(wirelessmode) \
577 (wirelessmode == WIRELESS_MODE_N_5G)
578
508enum ratr_table_mode { 579enum ratr_table_mode {
509 RATR_INX_WIRELESS_NGB = 0, 580 RATR_INX_WIRELESS_NGB = 0,
510 RATR_INX_WIRELESS_NG = 1, 581 RATR_INX_WIRELESS_NG = 1,
@@ -574,11 +645,11 @@ struct rtl_probe_rsp {
574struct rtl_led { 645struct rtl_led {
575 void *hw; 646 void *hw;
576 enum rtl_led_pin ledpin; 647 enum rtl_led_pin ledpin;
577 bool b_ledon; 648 bool ledon;
578}; 649};
579 650
580struct rtl_led_ctl { 651struct rtl_led_ctl {
581 bool bled_opendrain; 652 bool led_opendrain;
582 struct rtl_led sw_led0; 653 struct rtl_led sw_led0;
583 struct rtl_led sw_led1; 654 struct rtl_led sw_led1;
584}; 655};
@@ -603,6 +674,8 @@ struct false_alarm_statistics {
603 u32 cnt_rate_illegal; 674 u32 cnt_rate_illegal;
604 u32 cnt_crc8_fail; 675 u32 cnt_crc8_fail;
605 u32 cnt_mcs_fail; 676 u32 cnt_mcs_fail;
677 u32 cnt_fast_fsync_fail;
678 u32 cnt_sb_search_fail;
606 u32 cnt_ofdm_fail; 679 u32 cnt_ofdm_fail;
607 u32 cnt_cck_fail; 680 u32 cnt_cck_fail;
608 u32 cnt_all; 681 u32 cnt_all;
@@ -690,6 +763,32 @@ struct rtl_rfkill {
690 bool rfkill_state; /*0 is off, 1 is on */ 763 bool rfkill_state; /*0 is off, 1 is on */
691}; 764};
692 765
766#define IQK_MATRIX_REG_NUM 8
767#define IQK_MATRIX_SETTINGS_NUM (1 + 24 + 21)
768struct iqk_matrix_regs {
769 bool b_iqk_done;
770 long value[1][IQK_MATRIX_REG_NUM];
771};
772
773struct phy_parameters {
774 u16 length;
775 u32 *pdata;
776};
777
778enum hw_param_tab_index {
779 PHY_REG_2T,
780 PHY_REG_1T,
781 PHY_REG_PG,
782 RADIOA_2T,
783 RADIOB_2T,
784 RADIOA_1T,
785 RADIOB_1T,
786 MAC_REG,
787 AGCTAB_2T,
788 AGCTAB_1T,
789 MAX_TAB
790};
791
693struct rtl_phy { 792struct rtl_phy {
694 struct bb_reg_def phyreg_def[4]; /*Radio A/B/C/D */ 793 struct bb_reg_def phyreg_def[4]; /*Radio A/B/C/D */
695 struct init_gain initgain_backup; 794 struct init_gain initgain_backup;
@@ -705,8 +804,9 @@ struct rtl_phy {
705 u8 current_channel; 804 u8 current_channel;
706 u8 h2c_box_num; 805 u8 h2c_box_num;
707 u8 set_io_inprogress; 806 u8 set_io_inprogress;
807 u8 lck_inprogress;
708 808
709 /*record for power tracking*/ 809 /* record for power tracking */
710 s32 reg_e94; 810 s32 reg_e94;
711 s32 reg_e9c; 811 s32 reg_e9c;
712 s32 reg_ea4; 812 s32 reg_ea4;
@@ -723,26 +823,32 @@ struct rtl_phy {
723 u32 iqk_mac_backup[IQK_MAC_REG_NUM]; 823 u32 iqk_mac_backup[IQK_MAC_REG_NUM];
724 u32 iqk_bb_backup[10]; 824 u32 iqk_bb_backup[10];
725 825
726 bool b_rfpi_enable; 826 /* Dual mac */
827 bool need_iqk;
828 struct iqk_matrix_regs iqk_matrix_regsetting[IQK_MATRIX_SETTINGS_NUM];
829
830 bool rfpi_enable;
727 831
728 u8 pwrgroup_cnt; 832 u8 pwrgroup_cnt;
729 u8 bcck_high_power; 833 u8 cck_high_power;
730 /* 3 groups of pwr diff by rates*/ 834 /* MAX_PG_GROUP groups of pwr diff by rates */
731 u32 mcs_txpwrlevel_origoffset[4][16]; 835 u32 mcs_txpwrlevel_origoffset[MAX_PG_GROUP][16];
732 u8 default_initialgain[4]; 836 u8 default_initialgain[4];
733 837
734 /*the current Tx power level*/ 838 /* the current Tx power level */
735 u8 cur_cck_txpwridx; 839 u8 cur_cck_txpwridx;
736 u8 cur_ofdm24g_txpwridx; 840 u8 cur_ofdm24g_txpwridx;
737 841
738 u32 rfreg_chnlval[2]; 842 u32 rfreg_chnlval[2];
739 bool b_apk_done; 843 bool apk_done;
844 u32 reg_rf3c[2]; /* pathA / pathB */
740 845
741 /*fsync*/
742 u8 framesync; 846 u8 framesync;
743 u32 framesync_c34; 847 u32 framesync_c34;
744 848
745 u8 num_total_rfpath; 849 u8 num_total_rfpath;
850 struct phy_parameters hwparam_tables[MAX_TAB];
851 u16 rf_pathmap;
746}; 852};
747 853
748#define MAX_TID_COUNT 9 854#define MAX_TID_COUNT 9
@@ -768,6 +874,7 @@ struct rtl_tid_data {
768struct rtl_priv; 874struct rtl_priv;
769struct rtl_io { 875struct rtl_io {
770 struct device *dev; 876 struct device *dev;
877 struct mutex bb_mutex;
771 878
772 /*PCI MEM map */ 879 /*PCI MEM map */
773 unsigned long pci_mem_end; /*shared mem end */ 880 unsigned long pci_mem_end; /*shared mem end */
@@ -779,11 +886,14 @@ struct rtl_io {
779 void (*write8_async) (struct rtl_priv *rtlpriv, u32 addr, u8 val); 886 void (*write8_async) (struct rtl_priv *rtlpriv, u32 addr, u8 val);
780 void (*write16_async) (struct rtl_priv *rtlpriv, u32 addr, u16 val); 887 void (*write16_async) (struct rtl_priv *rtlpriv, u32 addr, u16 val);
781 void (*write32_async) (struct rtl_priv *rtlpriv, u32 addr, u32 val); 888 void (*write32_async) (struct rtl_priv *rtlpriv, u32 addr, u32 val);
782 889 int (*writeN_async) (struct rtl_priv *rtlpriv, u32 addr, u16 len,
783 u8(*read8_sync) (struct rtl_priv *rtlpriv, u32 addr); 890 u8 *pdata);
784 u16(*read16_sync) (struct rtl_priv *rtlpriv, u32 addr); 891
785 u32(*read32_sync) (struct rtl_priv *rtlpriv, u32 addr); 892 u8(*read8_sync) (struct rtl_priv *rtlpriv, u32 addr);
786 893 u16(*read16_sync) (struct rtl_priv *rtlpriv, u32 addr);
894 u32(*read32_sync) (struct rtl_priv *rtlpriv, u32 addr);
895 int (*readN_sync) (struct rtl_priv *rtlpriv, u32 addr, u16 len,
896 u8 *pdata);
787}; 897};
788 898
789struct rtl_mac { 899struct rtl_mac {
@@ -815,16 +925,24 @@ struct rtl_mac {
815 bool act_scanning; 925 bool act_scanning;
816 u8 cnt_after_linked; 926 u8 cnt_after_linked;
817 927
818 /*RDG*/ bool rdg_en; 928 /* early mode */
929 /* skb wait queue */
930 struct sk_buff_head skb_waitq[MAX_TID_COUNT];
931 u8 earlymode_threshold;
932
933 /*RDG*/
934 bool rdg_en;
819 935
820 /*AP*/ u8 bssid[6]; 936 /*AP*/
821 u8 mcs[16]; /*16 bytes mcs for HT rates.*/ 937 u8 bssid[6];
822 u32 basic_rates; /*b/g rates*/ 938 u32 vendor;
939 u8 mcs[16]; /* 16 bytes mcs for HT rates. */
940 u32 basic_rates; /* b/g rates */
823 u8 ht_enable; 941 u8 ht_enable;
824 u8 sgi_40; 942 u8 sgi_40;
825 u8 sgi_20; 943 u8 sgi_20;
826 u8 bw_40; 944 u8 bw_40;
827 u8 mode; /*wireless mode*/ 945 u8 mode; /* wireless mode */
828 u8 slot_time; 946 u8 slot_time;
829 u8 short_preamble; 947 u8 short_preamble;
830 u8 use_cts_protect; 948 u8 use_cts_protect;
@@ -835,9 +953,11 @@ struct rtl_mac {
835 u8 retry_long; 953 u8 retry_long;
836 u16 assoc_id; 954 u16 assoc_id;
837 955
838 /*IBSS*/ int beacon_interval; 956 /*IBSS*/
957 int beacon_interval;
839 958
840 /*AMPDU*/ u8 min_space_cfg; /*For Min spacing configurations */ 959 /*AMPDU*/
960 u8 min_space_cfg; /*For Min spacing configurations */
841 u8 max_mss_density; 961 u8 max_mss_density;
842 u8 current_ampdu_factor; 962 u8 current_ampdu_factor;
843 u8 current_ampdu_density; 963 u8 current_ampdu_density;
@@ -852,17 +972,54 @@ struct rtl_hal {
852 972
853 enum intf_type interface; 973 enum intf_type interface;
854 u16 hw_type; /*92c or 92d or 92s and so on */ 974 u16 hw_type; /*92c or 92d or 92s and so on */
975 u8 ic_class;
855 u8 oem_id; 976 u8 oem_id;
856 u8 version; /*version of chip */ 977 u32 version; /*version of chip */
857 u8 state; /*stop 0, start 1 */ 978 u8 state; /*stop 0, start 1 */
858 979
859 /*firmware */ 980 /*firmware */
981 u32 fwsize;
860 u8 *pfirmware; 982 u8 *pfirmware;
861 bool b_h2c_setinprogress; 983 u16 fw_version;
984 u16 fw_subversion;
985 bool h2c_setinprogress;
862 u8 last_hmeboxnum; 986 u8 last_hmeboxnum;
863 bool bfw_ready; 987 bool fw_ready;
864 /*Reserve page start offset except beacon in TxQ. */ 988 /*Reserve page start offset except beacon in TxQ. */
865 u8 fw_rsvdpage_startoffset; 989 u8 fw_rsvdpage_startoffset;
990 u8 h2c_txcmd_seq;
991
992 /* FW Cmd IO related */
993 u16 fwcmd_iomap;
994 u32 fwcmd_ioparam;
995 bool set_fwcmd_inprogress;
996 u8 current_fwcmd_io;
997
998 /**/
999 bool driver_going2unload;
1000
1001 /*AMPDU init min space*/
1002 u8 minspace_cfg; /*For Min spacing configurations */
1003
1004 /* Dual mac */
1005 enum macphy_mode macphymode;
1006 enum band_type current_bandtype; /* 0:2.4G, 1:5G */
1007 enum band_type current_bandtypebackup;
1008 enum band_type bandset;
1009 /* dual MAC 0--Mac0 1--Mac1 */
1010 u32 interfaceindex;
1011 /* just for DualMac S3S4 */
1012 u8 macphyctl_reg;
1013 bool earlymode_enable;
1014 /* Dual mac*/
1015 bool during_mac0init_radiob;
1016 bool during_mac1init_radioa;
1017 bool reloadtxpowerindex;
1018 /* True if IMR or IQK have done
1019 for 2.4G in scan progress */
1020 bool load_imrandiqk_setting_for2g;
1021
1022 bool disable_amsdu_8k;
866}; 1023};
867 1024
868struct rtl_security { 1025struct rtl_security {
@@ -887,48 +1044,61 @@ struct rtl_security {
887}; 1044};
888 1045
889struct rtl_dm { 1046struct rtl_dm {
890 /*PHY status for DM */ 1047 /*PHY status for Dynamic Management */
891 long entry_min_undecoratedsmoothed_pwdb; 1048 long entry_min_undecoratedsmoothed_pwdb;
892 long undecorated_smoothed_pwdb; /*out dm */ 1049 long undecorated_smoothed_pwdb; /*out dm */
893 long entry_max_undecoratedsmoothed_pwdb; 1050 long entry_max_undecoratedsmoothed_pwdb;
894 bool b_dm_initialgain_enable; 1051 bool dm_initialgain_enable;
895 bool bdynamic_txpower_enable; 1052 bool dynamic_txpower_enable;
896 bool bcurrent_turbo_edca; 1053 bool current_turbo_edca;
897 bool bis_any_nonbepkts; /*out dm */ 1054 bool is_any_nonbepkts; /*out dm */
898 bool bis_cur_rdlstate; 1055 bool is_cur_rdlstate;
899 bool btxpower_trackingInit; 1056 bool txpower_trackingInit;
900 bool b_disable_framebursting; 1057 bool disable_framebursting;
901 bool b_cck_inch14; 1058 bool cck_inch14;
902 bool btxpower_tracking; 1059 bool txpower_tracking;
903 bool b_useramask; 1060 bool useramask;
904 bool brfpath_rxenable[4]; 1061 bool rfpath_rxenable[4];
905 1062 bool inform_fw_driverctrldm;
1063 bool current_mrc_switch;
1064 u8 txpowercount;
1065
1066 u8 thermalvalue_rxgain;
906 u8 thermalvalue_iqk; 1067 u8 thermalvalue_iqk;
907 u8 thermalvalue_lck; 1068 u8 thermalvalue_lck;
908 u8 thermalvalue; 1069 u8 thermalvalue;
909 u8 last_dtp_lvl; 1070 u8 last_dtp_lvl;
1071 u8 thermalvalue_avg[AVG_THERMAL_NUM];
1072 u8 thermalvalue_avg_index;
1073 bool done_txpower;
910 u8 dynamic_txhighpower_lvl; /*Tx high power level */ 1074 u8 dynamic_txhighpower_lvl; /*Tx high power level */
911 u8 dm_flag; /*Indicate if each dynamic mechanism's status. */ 1075 u8 dm_flag; /*Indicate each dynamic mechanism's status. */
912 u8 dm_type; 1076 u8 dm_type;
913 u8 txpower_track_control; 1077 u8 txpower_track_control;
914 1078 bool interrupt_migration;
1079 bool disable_tx_int;
915 char ofdm_index[2]; 1080 char ofdm_index[2];
916 char cck_index; 1081 char cck_index;
1082 u8 power_index_backup[6];
917}; 1083};
918 1084
919#define EFUSE_MAX_LOGICAL_SIZE 128 1085#define EFUSE_MAX_LOGICAL_SIZE 256
920 1086
921struct rtl_efuse { 1087struct rtl_efuse {
922 bool bautoLoad_ok; 1088 bool autoLoad_ok;
923 bool bootfromefuse; 1089 bool bootfromefuse;
924 u16 max_physical_size; 1090 u16 max_physical_size;
925 u8 contents[EFUSE_MAX_LOGICAL_SIZE];
926 1091
927 u8 efuse_map[2][EFUSE_MAX_LOGICAL_SIZE]; 1092 u8 efuse_map[2][EFUSE_MAX_LOGICAL_SIZE];
928 u16 efuse_usedbytes; 1093 u16 efuse_usedbytes;
929 u8 efuse_usedpercentage; 1094 u8 efuse_usedpercentage;
1095#ifdef EFUSE_REPG_WORKAROUND
1096 bool efuse_re_pg_sec1flag;
1097 u8 efuse_re_pg_data[8];
1098#endif
930 1099
931 u8 autoload_failflag; 1100 u8 autoload_failflag;
1101 u8 autoload_status;
932 1102
933 short epromtype; 1103 short epromtype;
934 u16 eeprom_vid; 1104 u16 eeprom_vid;
@@ -938,69 +1108,90 @@ struct rtl_efuse {
938 u8 eeprom_oemid; 1108 u8 eeprom_oemid;
939 u16 eeprom_channelplan; 1109 u16 eeprom_channelplan;
940 u8 eeprom_version; 1110 u8 eeprom_version;
1111 u8 board_type;
1112 u8 external_pa;
941 1113
942 u8 dev_addr[6]; 1114 u8 dev_addr[6];
943 1115
944 bool b_txpwr_fromeprom; 1116 bool txpwr_fromeprom;
1117 u8 eeprom_crystalcap;
945 u8 eeprom_tssi[2]; 1118 u8 eeprom_tssi[2];
946 u8 eeprom_pwrlimit_ht20[3]; 1119 u8 eeprom_tssi_5g[3][2]; /* for 5GL/5GM/5GH band. */
947 u8 eeprom_pwrlimit_ht40[3]; 1120 u8 eeprom_pwrlimit_ht20[CHANNEL_GROUP_MAX];
948 u8 eeprom_chnlarea_txpwr_cck[2][3]; 1121 u8 eeprom_pwrlimit_ht40[CHANNEL_GROUP_MAX];
949 u8 eeprom_chnlarea_txpwr_ht40_1s[2][3]; 1122 u8 eeprom_chnlarea_txpwr_cck[2][CHANNEL_GROUP_MAX_2G];
950 u8 eeprom_chnlarea_txpwr_ht40_2sdiif[2][3]; 1123 u8 eeprom_chnlarea_txpwr_ht40_1s[2][CHANNEL_GROUP_MAX];
951 u8 txpwrlevel_cck[2][14]; 1124 u8 eeprom_chnlarea_txpwr_ht40_2sdiif[2][CHANNEL_GROUP_MAX];
952 u8 txpwrlevel_ht40_1s[2][14]; /*For HT 40MHZ pwr */ 1125 u8 txpwrlevel_cck[2][CHANNEL_MAX_NUMBER_2G];
953 u8 txpwrlevel_ht40_2s[2][14]; /*For HT 40MHZ pwr */ 1126 u8 txpwrlevel_ht40_1s[2][CHANNEL_MAX_NUMBER]; /*For HT 40MHZ pwr */
1127 u8 txpwrlevel_ht40_2s[2][CHANNEL_MAX_NUMBER]; /*For HT 40MHZ pwr */
1128
1129 u8 internal_pa_5g[2]; /* pathA / pathB */
1130 u8 eeprom_c9;
1131 u8 eeprom_cc;
954 1132
955 /*For power group */ 1133 /*For power group */
956 u8 pwrgroup_ht20[2][14]; 1134 u8 eeprom_pwrgroup[2][3];
957 u8 pwrgroup_ht40[2][14]; 1135 u8 pwrgroup_ht20[2][CHANNEL_MAX_NUMBER];
958 1136 u8 pwrgroup_ht40[2][CHANNEL_MAX_NUMBER];
959 char txpwr_ht20diff[2][14]; /*HT 20<->40 Pwr diff */ 1137
960 u8 txpwr_legacyhtdiff[2][14]; /*For HT<->legacy pwr diff */ 1138 char txpwr_ht20diff[2][CHANNEL_MAX_NUMBER]; /*HT 20<->40 Pwr diff */
1139 /*For HT<->legacy pwr diff*/
1140 u8 txpwr_legacyhtdiff[2][CHANNEL_MAX_NUMBER];
1141 u8 txpwr_safetyflag; /* Band edge enable flag */
1142 u16 eeprom_txpowerdiff;
1143 u8 legacy_httxpowerdiff; /* Legacy to HT rate power diff */
1144 u8 antenna_txpwdiff[3];
961 1145
962 u8 eeprom_regulatory; 1146 u8 eeprom_regulatory;
963 u8 eeprom_thermalmeter; 1147 u8 eeprom_thermalmeter;
964 /*ThermalMeter, index 0 for RFIC0, and 1 for RFIC1 */ 1148 u8 thermalmeter[2]; /*ThermalMeter, index 0 for RFIC0, 1 for RFIC1 */
965 u8 thermalmeter[2]; 1149 u16 tssi_13dbm;
1150 u8 crystalcap; /* CrystalCap. */
1151 u8 delta_iqk;
1152 u8 delta_lck;
966 1153
967 u8 legacy_ht_txpowerdiff; /*Legacy to HT rate power diff */ 1154 u8 legacy_ht_txpowerdiff; /*Legacy to HT rate power diff */
968 bool b_apk_thermalmeterignore; 1155 bool apk_thermalmeterignore;
1156
1157 bool b1x1_recvcombine;
1158 bool b1ss_support;
1159
1160 /*channel plan */
1161 u8 channel_plan;
969}; 1162};
970 1163
971struct rtl_ps_ctl { 1164struct rtl_ps_ctl {
1165 bool pwrdomain_protect;
972 bool set_rfpowerstate_inprogress; 1166 bool set_rfpowerstate_inprogress;
973 bool b_in_powersavemode; 1167 bool in_powersavemode;
974 bool rfchange_inprogress; 1168 bool rfchange_inprogress;
975 bool b_swrf_processing; 1169 bool swrf_processing;
976 bool b_hwradiooff; 1170 bool hwradiooff;
977
978 u32 last_sleep_jiffies;
979 u32 last_awake_jiffies;
980 u32 last_delaylps_stamp_jiffies;
981 1171
982 /* 1172 /*
983 * just for PCIE ASPM 1173 * just for PCIE ASPM
984 * If it supports ASPM, Offset[560h] = 0x40, 1174 * If it supports ASPM, Offset[560h] = 0x40,
985 * otherwise Offset[560h] = 0x00. 1175 * otherwise Offset[560h] = 0x00.
986 * */ 1176 * */
987 bool b_support_aspm; 1177 bool support_aspm;
988 bool b_support_backdoor; 1178 bool support_backdoor;
989 1179
990 /*for LPS */ 1180 /*for LPS */
991 enum rt_psmode dot11_psmode; /*Power save mode configured. */ 1181 enum rt_psmode dot11_psmode; /*Power save mode configured. */
992 bool b_leisure_ps; 1182 bool swctrl_lps;
993 bool b_fwctrl_lps; 1183 bool leisure_ps;
1184 bool fwctrl_lps;
994 u8 fwctrl_psmode; 1185 u8 fwctrl_psmode;
995 /*For Fw control LPS mode */ 1186 /*For Fw control LPS mode */
996 u8 b_reg_fwctrl_lps; 1187 u8 reg_fwctrl_lps;
997 /*Record Fw PS mode status. */ 1188 /*Record Fw PS mode status. */
998 bool b_fw_current_inpsmode; 1189 bool fw_current_inpsmode;
999 u8 reg_max_lps_awakeintvl; 1190 u8 reg_max_lps_awakeintvl;
1000 bool report_linked; 1191 bool report_linked;
1001 1192
1002 /*for IPS */ 1193 /*for IPS */
1003 bool b_inactiveps; 1194 bool inactiveps;
1004 1195
1005 u32 rfoff_reason; 1196 u32 rfoff_reason;
1006 1197
@@ -1011,8 +1202,26 @@ struct rtl_ps_ctl {
1011 /*just for PCIE ASPM */ 1202 /*just for PCIE ASPM */
1012 u8 const_amdpci_aspm; 1203 u8 const_amdpci_aspm;
1013 1204
1205 bool pwrdown_mode;
1206
1014 enum rf_pwrstate inactive_pwrstate; 1207 enum rf_pwrstate inactive_pwrstate;
1015 enum rf_pwrstate rfpwr_state; /*cur power state */ 1208 enum rf_pwrstate rfpwr_state; /*cur power state */
1209
1210 /* for SW LPS*/
1211 bool sw_ps_enabled;
1212 bool state;
1213 bool state_inap;
1214 bool multi_buffered;
1215 u16 nullfunc_seq;
1216 unsigned int dtim_counter;
1217 unsigned int sleep_ms;
1218 unsigned long last_sleep_jiffies;
1219 unsigned long last_awake_jiffies;
1220 unsigned long last_delaylps_stamp_jiffies;
1221 unsigned long last_dtim;
1222 unsigned long last_beacon;
1223 unsigned long last_action;
1224 unsigned long last_slept;
1016}; 1225};
1017 1226
1018struct rtl_stats { 1227struct rtl_stats {
@@ -1038,10 +1247,10 @@ struct rtl_stats {
1038 s32 recvsignalpower; 1247 s32 recvsignalpower;
1039 s8 rxpower; /*in dBm Translate from PWdB */ 1248 s8 rxpower; /*in dBm Translate from PWdB */
1040 u8 signalstrength; /*in 0-100 index. */ 1249 u8 signalstrength; /*in 0-100 index. */
1041 u16 b_hwerror:1; 1250 u16 hwerror:1;
1042 u16 b_crc:1; 1251 u16 crc:1;
1043 u16 b_icv:1; 1252 u16 icv:1;
1044 u16 b_shortpreamble:1; 1253 u16 shortpreamble:1;
1045 u16 antenna:1; 1254 u16 antenna:1;
1046 u16 decrypted:1; 1255 u16 decrypted:1;
1047 u16 wakeup:1; 1256 u16 wakeup:1;
@@ -1050,15 +1259,16 @@ struct rtl_stats {
1050 1259
1051 u8 rx_drvinfo_size; 1260 u8 rx_drvinfo_size;
1052 u8 rx_bufshift; 1261 u8 rx_bufshift;
1053 bool b_isampdu; 1262 bool isampdu;
1263 bool isfirst_ampdu;
1054 bool rx_is40Mhzpacket; 1264 bool rx_is40Mhzpacket;
1055 u32 rx_pwdb_all; 1265 u32 rx_pwdb_all;
1056 u8 rx_mimo_signalstrength[4]; /*in 0~100 index */ 1266 u8 rx_mimo_signalstrength[4]; /*in 0~100 index */
1057 s8 rx_mimo_signalquality[2]; 1267 s8 rx_mimo_signalquality[2];
1058 bool b_packet_matchbssid; 1268 bool packet_matchbssid;
1059 bool b_is_cck; 1269 bool is_cck;
1060 bool b_packet_toself; 1270 bool packet_toself;
1061 bool b_packet_beacon; /*for rssi */ 1271 bool packet_beacon; /*for rssi */
1062 char cck_adc_pwdb[4]; /*for rx path selection */ 1272 char cck_adc_pwdb[4]; /*for rx path selection */
1063}; 1273};
1064 1274
@@ -1069,23 +1279,23 @@ struct rt_link_detect {
1069 u32 num_tx_inperiod; 1279 u32 num_tx_inperiod;
1070 u32 num_rx_inperiod; 1280 u32 num_rx_inperiod;
1071 1281
1072 bool b_busytraffic; 1282 bool busytraffic;
1073 bool b_higher_busytraffic; 1283 bool higher_busytraffic;
1074 bool b_higher_busyrxtraffic; 1284 bool higher_busyrxtraffic;
1075}; 1285};
1076 1286
1077struct rtl_tcb_desc { 1287struct rtl_tcb_desc {
1078 u8 b_packet_bw:1; 1288 u8 packet_bw:1;
1079 u8 b_multicast:1; 1289 u8 multicast:1;
1080 u8 b_broadcast:1; 1290 u8 broadcast:1;
1081 1291
1082 u8 b_rts_stbc:1; 1292 u8 rts_stbc:1;
1083 u8 b_rts_enable:1; 1293 u8 rts_enable:1;
1084 u8 b_cts_enable:1; 1294 u8 cts_enable:1;
1085 u8 b_rts_use_shortpreamble:1; 1295 u8 rts_use_shortpreamble:1;
1086 u8 b_rts_use_shortgi:1; 1296 u8 rts_use_shortgi:1;
1087 u8 rts_sc:1; 1297 u8 rts_sc:1;
1088 u8 b_rts_bw:1; 1298 u8 rts_bw:1;
1089 u8 rts_rate; 1299 u8 rts_rate;
1090 1300
1091 u8 use_shortgi:1; 1301 u8 use_shortgi:1;
@@ -1096,20 +1306,34 @@ struct rtl_tcb_desc {
1096 u8 ratr_index; 1306 u8 ratr_index;
1097 u8 mac_id; 1307 u8 mac_id;
1098 u8 hw_rate; 1308 u8 hw_rate;
1309
1310 u8 last_inipkt:1;
1311 u8 cmd_or_init:1;
1312 u8 queue_index;
1313
1314 /* early mode */
1315 u8 empkt_num;
1316 /* The max value by HW */
1317 u32 empkt_len[5];
1099}; 1318};
1100 1319
1101struct rtl_hal_ops { 1320struct rtl_hal_ops {
1102 int (*init_sw_vars) (struct ieee80211_hw *hw); 1321 int (*init_sw_vars) (struct ieee80211_hw *hw);
1103 void (*deinit_sw_vars) (struct ieee80211_hw *hw); 1322 void (*deinit_sw_vars) (struct ieee80211_hw *hw);
1323 void (*read_chip_version)(struct ieee80211_hw *hw);
1104 void (*read_eeprom_info) (struct ieee80211_hw *hw); 1324 void (*read_eeprom_info) (struct ieee80211_hw *hw);
1105 void (*interrupt_recognized) (struct ieee80211_hw *hw, 1325 void (*interrupt_recognized) (struct ieee80211_hw *hw,
1106 u32 *p_inta, u32 *p_intb); 1326 u32 *p_inta, u32 *p_intb);
1107 int (*hw_init) (struct ieee80211_hw *hw); 1327 int (*hw_init) (struct ieee80211_hw *hw);
1108 void (*hw_disable) (struct ieee80211_hw *hw); 1328 void (*hw_disable) (struct ieee80211_hw *hw);
1329 void (*hw_suspend) (struct ieee80211_hw *hw);
1330 void (*hw_resume) (struct ieee80211_hw *hw);
1109 void (*enable_interrupt) (struct ieee80211_hw *hw); 1331 void (*enable_interrupt) (struct ieee80211_hw *hw);
1110 void (*disable_interrupt) (struct ieee80211_hw *hw); 1332 void (*disable_interrupt) (struct ieee80211_hw *hw);
1111 int (*set_network_type) (struct ieee80211_hw *hw, 1333 int (*set_network_type) (struct ieee80211_hw *hw,
1112 enum nl80211_iftype type); 1334 enum nl80211_iftype type);
1335 void (*set_chk_bssid)(struct ieee80211_hw *hw,
1336 bool check_bssid);
1113 void (*set_bw_mode) (struct ieee80211_hw *hw, 1337 void (*set_bw_mode) (struct ieee80211_hw *hw,
1114 enum nl80211_channel_type ch_type); 1338 enum nl80211_channel_type ch_type);
1115 u8(*switch_channel) (struct ieee80211_hw *hw); 1339 u8(*switch_channel) (struct ieee80211_hw *hw);
@@ -1126,23 +1350,26 @@ struct rtl_hal_ops {
1126 struct ieee80211_hdr *hdr, u8 *pdesc_tx, 1350 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
1127 struct ieee80211_tx_info *info, 1351 struct ieee80211_tx_info *info,
1128 struct sk_buff *skb, unsigned int queue_index); 1352 struct sk_buff *skb, unsigned int queue_index);
1353 void (*fill_fake_txdesc) (struct ieee80211_hw *hw, u8 * pDesc,
1354 u32 buffer_len, bool bIsPsPoll);
1129 void (*fill_tx_cmddesc) (struct ieee80211_hw *hw, u8 *pdesc, 1355 void (*fill_tx_cmddesc) (struct ieee80211_hw *hw, u8 *pdesc,
1130 bool b_firstseg, bool b_lastseg, 1356 bool firstseg, bool lastseg,
1131 struct sk_buff *skb); 1357 struct sk_buff *skb);
1132 bool(*query_rx_desc) (struct ieee80211_hw *hw, 1358 bool (*cmd_send_packet)(struct ieee80211_hw *hw, struct sk_buff *skb);
1359 bool (*query_rx_desc) (struct ieee80211_hw *hw,
1133 struct rtl_stats *stats, 1360 struct rtl_stats *stats,
1134 struct ieee80211_rx_status *rx_status, 1361 struct ieee80211_rx_status *rx_status,
1135 u8 *pdesc, struct sk_buff *skb); 1362 u8 *pdesc, struct sk_buff *skb);
1136 void (*set_channel_access) (struct ieee80211_hw *hw); 1363 void (*set_channel_access) (struct ieee80211_hw *hw);
1137 bool(*radio_onoff_checking) (struct ieee80211_hw *hw, u8 *valid); 1364 bool (*radio_onoff_checking) (struct ieee80211_hw *hw, u8 *valid);
1138 void (*dm_watchdog) (struct ieee80211_hw *hw); 1365 void (*dm_watchdog) (struct ieee80211_hw *hw);
1139 void (*scan_operation_backup) (struct ieee80211_hw *hw, u8 operation); 1366 void (*scan_operation_backup) (struct ieee80211_hw *hw, u8 operation);
1140 bool(*set_rf_power_state) (struct ieee80211_hw *hw, 1367 bool (*set_rf_power_state) (struct ieee80211_hw *hw,
1141 enum rf_pwrstate rfpwr_state); 1368 enum rf_pwrstate rfpwr_state);
1142 void (*led_control) (struct ieee80211_hw *hw, 1369 void (*led_control) (struct ieee80211_hw *hw,
1143 enum led_ctl_mode ledaction); 1370 enum led_ctl_mode ledaction);
1144 void (*set_desc) (u8 *pdesc, bool istx, u8 desc_name, u8 *val); 1371 void (*set_desc) (u8 *pdesc, bool istx, u8 desc_name, u8 *val);
1145 u32(*get_desc) (u8 *pdesc, bool istx, u8 desc_name); 1372 u32 (*get_desc) (u8 *pdesc, bool istx, u8 desc_name);
1146 void (*tx_polling) (struct ieee80211_hw *hw, unsigned int hw_queue); 1373 void (*tx_polling) (struct ieee80211_hw *hw, unsigned int hw_queue);
1147 void (*enable_hw_sec) (struct ieee80211_hw *hw); 1374 void (*enable_hw_sec) (struct ieee80211_hw *hw);
1148 void (*set_key) (struct ieee80211_hw *hw, u32 key_index, 1375 void (*set_key) (struct ieee80211_hw *hw, u32 key_index,
@@ -1150,10 +1377,10 @@ struct rtl_hal_ops {
1150 bool is_wepkey, bool clear_all); 1377 bool is_wepkey, bool clear_all);
1151 void (*init_sw_leds) (struct ieee80211_hw *hw); 1378 void (*init_sw_leds) (struct ieee80211_hw *hw);
1152 void (*deinit_sw_leds) (struct ieee80211_hw *hw); 1379 void (*deinit_sw_leds) (struct ieee80211_hw *hw);
1153 u32(*get_bbreg) (struct ieee80211_hw *hw, u32 regaddr, u32 bitmask); 1380 u32 (*get_bbreg) (struct ieee80211_hw *hw, u32 regaddr, u32 bitmask);
1154 void (*set_bbreg) (struct ieee80211_hw *hw, u32 regaddr, u32 bitmask, 1381 void (*set_bbreg) (struct ieee80211_hw *hw, u32 regaddr, u32 bitmask,
1155 u32 data); 1382 u32 data);
1156 u32(*get_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath, 1383 u32 (*get_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath,
1157 u32 regaddr, u32 bitmask); 1384 u32 regaddr, u32 bitmask);
1158 void (*set_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath, 1385 void (*set_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath,
1159 u32 regaddr, u32 bitmask, u32 data); 1386 u32 regaddr, u32 bitmask, u32 data);
@@ -1161,11 +1388,13 @@ struct rtl_hal_ops {
1161 1388
1162struct rtl_intf_ops { 1389struct rtl_intf_ops {
1163 /*com */ 1390 /*com */
1391 void (*read_efuse_byte)(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf);
1164 int (*adapter_start) (struct ieee80211_hw *hw); 1392 int (*adapter_start) (struct ieee80211_hw *hw);
1165 void (*adapter_stop) (struct ieee80211_hw *hw); 1393 void (*adapter_stop) (struct ieee80211_hw *hw);
1166 1394
1167 int (*adapter_tx) (struct ieee80211_hw *hw, struct sk_buff *skb); 1395 int (*adapter_tx) (struct ieee80211_hw *hw, struct sk_buff *skb);
1168 int (*reset_trx_ring) (struct ieee80211_hw *hw); 1396 int (*reset_trx_ring) (struct ieee80211_hw *hw);
1397 bool (*waitq_insert) (struct ieee80211_hw *hw, struct sk_buff *skb);
1169 1398
1170 /*pci */ 1399 /*pci */
1171 void (*disable_aspm) (struct ieee80211_hw *hw); 1400 void (*disable_aspm) (struct ieee80211_hw *hw);
@@ -1179,11 +1408,36 @@ struct rtl_mod_params {
1179 int sw_crypto; 1408 int sw_crypto;
1180}; 1409};
1181 1410
1411struct rtl_hal_usbint_cfg {
1412 /* data - rx */
1413 u32 in_ep_num;
1414 u32 rx_urb_num;
1415 u32 rx_max_size;
1416
1417 /* op - rx */
1418 void (*usb_rx_hdl)(struct ieee80211_hw *, struct sk_buff *);
1419 void (*usb_rx_segregate_hdl)(struct ieee80211_hw *, struct sk_buff *,
1420 struct sk_buff_head *);
1421
1422 /* tx */
1423 void (*usb_tx_cleanup)(struct ieee80211_hw *, struct sk_buff *);
1424 int (*usb_tx_post_hdl)(struct ieee80211_hw *, struct urb *,
1425 struct sk_buff *);
1426 struct sk_buff *(*usb_tx_aggregate_hdl)(struct ieee80211_hw *,
1427 struct sk_buff_head *);
1428
1429 /* endpoint mapping */
1430 int (*usb_endpoint_mapping)(struct ieee80211_hw *hw);
1431 u16 (*usb_mq_to_hwq)(__le16 fc, u16 mac80211_queue_index);
1432};
1433
1182struct rtl_hal_cfg { 1434struct rtl_hal_cfg {
1435 u8 bar_id;
1183 char *name; 1436 char *name;
1184 char *fw_name; 1437 char *fw_name;
1185 struct rtl_hal_ops *ops; 1438 struct rtl_hal_ops *ops;
1186 struct rtl_mod_params *mod_params; 1439 struct rtl_mod_params *mod_params;
1440 struct rtl_hal_usbint_cfg *usb_interface_cfg;
1187 1441
1188 /*this map used for some registers or vars 1442 /*this map used for some registers or vars
1189 defined int HAL but used in MAIN */ 1443 defined int HAL but used in MAIN */
@@ -1202,6 +1456,11 @@ struct rtl_locks {
1202 spinlock_t rf_ps_lock; 1456 spinlock_t rf_ps_lock;
1203 spinlock_t rf_lock; 1457 spinlock_t rf_lock;
1204 spinlock_t lps_lock; 1458 spinlock_t lps_lock;
1459 spinlock_t waitq_lock;
1460 spinlock_t tx_urb_lock;
1461
1462 /*Dual mac*/
1463 spinlock_t cck_and_rw_pagea_lock;
1205}; 1464};
1206 1465
1207struct rtl_works { 1466struct rtl_works {
@@ -1218,12 +1477,20 @@ struct rtl_works {
1218 struct workqueue_struct *rtl_wq; 1477 struct workqueue_struct *rtl_wq;
1219 struct delayed_work watchdog_wq; 1478 struct delayed_work watchdog_wq;
1220 struct delayed_work ips_nic_off_wq; 1479 struct delayed_work ips_nic_off_wq;
1480
1481 /* For SW LPS */
1482 struct delayed_work ps_work;
1483 struct delayed_work ps_rfon_wq;
1221}; 1484};
1222 1485
1223struct rtl_debug { 1486struct rtl_debug {
1224 u32 dbgp_type[DBGP_TYPE_MAX]; 1487 u32 dbgp_type[DBGP_TYPE_MAX];
1225 u32 global_debuglevel; 1488 u32 global_debuglevel;
1226 u64 global_debugcomponents; 1489 u64 global_debugcomponents;
1490
1491 /* add for proc debug */
1492 struct proc_dir_entry *proc_dir;
1493 char proc_name[20];
1227}; 1494};
1228 1495
1229struct rtl_priv { 1496struct rtl_priv {
@@ -1274,6 +1541,91 @@ struct rtl_priv {
1274#define rtl_efuse(rtlpriv) (&((rtlpriv)->efuse)) 1541#define rtl_efuse(rtlpriv) (&((rtlpriv)->efuse))
1275#define rtl_psc(rtlpriv) (&((rtlpriv)->psc)) 1542#define rtl_psc(rtlpriv) (&((rtlpriv)->psc))
1276 1543
1544
1545/***************************************
1546 Bluetooth Co-existance Related
1547****************************************/
1548
1549enum bt_ant_num {
1550 ANT_X2 = 0,
1551 ANT_X1 = 1,
1552};
1553
1554enum bt_co_type {
1555 BT_2WIRE = 0,
1556 BT_ISSC_3WIRE = 1,
1557 BT_ACCEL = 2,
1558 BT_CSR_BC4 = 3,
1559 BT_CSR_BC8 = 4,
1560 BT_RTL8756 = 5,
1561};
1562
1563enum bt_cur_state {
1564 BT_OFF = 0,
1565 BT_ON = 1,
1566};
1567
1568enum bt_service_type {
1569 BT_SCO = 0,
1570 BT_A2DP = 1,
1571 BT_HID = 2,
1572 BT_HID_IDLE = 3,
1573 BT_SCAN = 4,
1574 BT_IDLE = 5,
1575 BT_OTHER_ACTION = 6,
1576 BT_BUSY = 7,
1577 BT_OTHERBUSY = 8,
1578 BT_PAN = 9,
1579};
1580
1581enum bt_radio_shared {
1582 BT_RADIO_SHARED = 0,
1583 BT_RADIO_INDIVIDUAL = 1,
1584};
1585
1586struct bt_coexist_info {
1587
1588 /* EEPROM BT info. */
1589 u8 eeprom_bt_coexist;
1590 u8 eeprom_bt_type;
1591 u8 eeprom_bt_ant_num;
1592 u8 eeprom_bt_ant_isolation;
1593 u8 eeprom_bt_radio_shared;
1594
1595 u8 bt_coexistence;
1596 u8 bt_ant_num;
1597 u8 bt_coexist_type;
1598 u8 bt_state;
1599 u8 bt_cur_state; /* 0:on, 1:off */
1600 u8 bt_ant_isolation; /* 0:good, 1:bad */
1601 u8 bt_pape_ctrl; /* 0:SW, 1:SW/HW dynamic */
1602 u8 bt_service;
1603 u8 bt_radio_shared_type;
1604 u8 bt_rfreg_origin_1e;
1605 u8 bt_rfreg_origin_1f;
1606 u8 bt_rssi_state;
1607 u32 ratio_tx;
1608 u32 ratio_pri;
1609 u32 bt_edca_ul;
1610 u32 bt_edca_dl;
1611
1612 bool b_init_set;
1613 bool b_bt_busy_traffic;
1614 bool b_bt_traffic_mode_set;
1615 bool b_bt_non_traffic_mode_set;
1616
1617 bool b_fw_coexist_all_off;
1618 bool b_sw_coexist_all_off;
1619 u32 current_state;
1620 u32 previous_state;
1621 u8 bt_pre_rssi_state;
1622
1623 u8 b_reg_bt_iso;
1624 u8 b_reg_bt_sco;
1625
1626};
1627
1628
1277/**************************************** 1629/****************************************
1278 mem access macro define start 1630 mem access macro define start
1279 Call endian free function when 1631 Call endian free function when
@@ -1281,7 +1633,7 @@ struct rtl_priv {
1281 2. Before write integer to IO. 1633 2. Before write integer to IO.
1282 3. After read integer from IO. 1634 3. After read integer from IO.
1283****************************************/ 1635****************************************/
1284/* Convert little data endian to host */ 1636/* Convert little data endian to host ordering */
1285#define EF1BYTE(_val) \ 1637#define EF1BYTE(_val) \
1286 ((u8)(_val)) 1638 ((u8)(_val))
1287#define EF2BYTE(_val) \ 1639#define EF2BYTE(_val) \
@@ -1289,27 +1641,21 @@ struct rtl_priv {
1289#define EF4BYTE(_val) \ 1641#define EF4BYTE(_val) \
1290 (le32_to_cpu(_val)) 1642 (le32_to_cpu(_val))
1291 1643
1292/* Read data from memory */ 1644/* Read le16 data from memory and convert to host ordering */
1293#define READEF1BYTE(_ptr) \
1294 EF1BYTE(*((u8 *)(_ptr)))
1295#define READEF2BYTE(_ptr) \ 1645#define READEF2BYTE(_ptr) \
1296 EF2BYTE(*((u16 *)(_ptr))) 1646 EF2BYTE(*((u16 *)(_ptr)))
1297#define READEF4BYTE(_ptr) \
1298 EF4BYTE(*((u32 *)(_ptr)))
1299 1647
1300/* Write data to memory */ 1648/* Write le16 data to memory in host ordering */
1301#define WRITEEF1BYTE(_ptr, _val) \
1302 (*((u8 *)(_ptr))) = EF1BYTE(_val)
1303#define WRITEEF2BYTE(_ptr, _val) \ 1649#define WRITEEF2BYTE(_ptr, _val) \
1304 (*((u16 *)(_ptr))) = EF2BYTE(_val) 1650 (*((u16 *)(_ptr))) = EF2BYTE(_val)
1305#define WRITEEF4BYTE(_ptr, _val) \ 1651
1306 (*((u32 *)(_ptr))) = EF4BYTE(_val) 1652/* Create a bit mask
1307 1653 * Examples:
1308/*Example: 1654 * BIT_LEN_MASK_32(0) => 0x00000000
1309BIT_LEN_MASK_32(0) => 0x00000000 1655 * BIT_LEN_MASK_32(1) => 0x00000001
1310BIT_LEN_MASK_32(1) => 0x00000001 1656 * BIT_LEN_MASK_32(2) => 0x00000003
1311BIT_LEN_MASK_32(2) => 0x00000003 1657 * BIT_LEN_MASK_32(32) => 0xFFFFFFFF
1312BIT_LEN_MASK_32(32) => 0xFFFFFFFF*/ 1658 */
1313#define BIT_LEN_MASK_32(__bitlen) \ 1659#define BIT_LEN_MASK_32(__bitlen) \
1314 (0xFFFFFFFF >> (32 - (__bitlen))) 1660 (0xFFFFFFFF >> (32 - (__bitlen)))
1315#define BIT_LEN_MASK_16(__bitlen) \ 1661#define BIT_LEN_MASK_16(__bitlen) \
@@ -1317,9 +1663,11 @@ BIT_LEN_MASK_32(32) => 0xFFFFFFFF*/
1317#define BIT_LEN_MASK_8(__bitlen) \ 1663#define BIT_LEN_MASK_8(__bitlen) \
1318 (0xFF >> (8 - (__bitlen))) 1664 (0xFF >> (8 - (__bitlen)))
1319 1665
1320/*Example: 1666/* Create an offset bit mask
1321BIT_OFFSET_LEN_MASK_32(0, 2) => 0x00000003 1667 * Examples:
1322BIT_OFFSET_LEN_MASK_32(16, 2) => 0x00030000*/ 1668 * BIT_OFFSET_LEN_MASK_32(0, 2) => 0x00000003
1669 * BIT_OFFSET_LEN_MASK_32(16, 2) => 0x00030000
1670 */
1323#define BIT_OFFSET_LEN_MASK_32(__bitoffset, __bitlen) \ 1671#define BIT_OFFSET_LEN_MASK_32(__bitoffset, __bitlen) \
1324 (BIT_LEN_MASK_32(__bitlen) << (__bitoffset)) 1672 (BIT_LEN_MASK_32(__bitlen) << (__bitoffset))
1325#define BIT_OFFSET_LEN_MASK_16(__bitoffset, __bitlen) \ 1673#define BIT_OFFSET_LEN_MASK_16(__bitoffset, __bitlen) \
@@ -1328,8 +1676,9 @@ BIT_OFFSET_LEN_MASK_32(16, 2) => 0x00030000*/
1328 (BIT_LEN_MASK_8(__bitlen) << (__bitoffset)) 1676 (BIT_LEN_MASK_8(__bitlen) << (__bitoffset))
1329 1677
1330/*Description: 1678/*Description:
1331Return 4-byte value in host byte ordering from 1679 * Return 4-byte value in host byte ordering from
13324-byte pointer in little-endian system.*/ 1680 * 4-byte pointer in little-endian system.
1681 */
1333#define LE_P4BYTE_TO_HOST_4BYTE(__pstart) \ 1682#define LE_P4BYTE_TO_HOST_4BYTE(__pstart) \
1334 (EF4BYTE(*((u32 *)(__pstart)))) 1683 (EF4BYTE(*((u32 *)(__pstart))))
1335#define LE_P2BYTE_TO_HOST_2BYTE(__pstart) \ 1684#define LE_P2BYTE_TO_HOST_2BYTE(__pstart) \
@@ -1337,28 +1686,10 @@ Return 4-byte value in host byte ordering from
1337#define LE_P1BYTE_TO_HOST_1BYTE(__pstart) \ 1686#define LE_P1BYTE_TO_HOST_1BYTE(__pstart) \
1338 (EF1BYTE(*((u8 *)(__pstart)))) 1687 (EF1BYTE(*((u8 *)(__pstart))))
1339 1688
1340/*Description: 1689/* Description:
1341Translate subfield (continuous bits in little-endian) of 4-byte 1690 * Mask subfield (continuous bits in little-endian) of 4-byte value
1342value to host byte ordering.*/ 1691 * and return the result in 4-byte value in host byte ordering.
1343#define LE_BITS_TO_4BYTE(__pstart, __bitoffset, __bitlen) \ 1692 */
1344 ( \
1345 (LE_P4BYTE_TO_HOST_4BYTE(__pstart) >> (__bitoffset)) & \
1346 BIT_LEN_MASK_32(__bitlen) \
1347 )
1348#define LE_BITS_TO_2BYTE(__pstart, __bitoffset, __bitlen) \
1349 ( \
1350 (LE_P2BYTE_TO_HOST_2BYTE(__pstart) >> (__bitoffset)) & \
1351 BIT_LEN_MASK_16(__bitlen) \
1352 )
1353#define LE_BITS_TO_1BYTE(__pstart, __bitoffset, __bitlen) \
1354 ( \
1355 (LE_P1BYTE_TO_HOST_1BYTE(__pstart) >> (__bitoffset)) & \
1356 BIT_LEN_MASK_8(__bitlen) \
1357 )
1358
1359/*Description:
1360Mask subfield (continuous bits in little-endian) of 4-byte value
1361and return the result in 4-byte value in host byte ordering.*/
1362#define LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) \ 1693#define LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) \
1363 ( \ 1694 ( \
1364 LE_P4BYTE_TO_HOST_4BYTE(__pstart) & \ 1695 LE_P4BYTE_TO_HOST_4BYTE(__pstart) & \
@@ -1375,20 +1706,9 @@ and return the result in 4-byte value in host byte ordering.*/
1375 (~BIT_OFFSET_LEN_MASK_8(__bitoffset, __bitlen)) \ 1706 (~BIT_OFFSET_LEN_MASK_8(__bitoffset, __bitlen)) \
1376 ) 1707 )
1377 1708
1378/*Description: 1709/* Description:
1379Set subfield of little-endian 4-byte value to specified value. */ 1710 * Set subfield of little-endian 4-byte value to specified value.
1380#define SET_BITS_TO_LE_4BYTE(__pstart, __bitoffset, __bitlen, __val) \ 1711 */
1381 *((u32 *)(__pstart)) = EF4BYTE \
1382 ( \
1383 LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) | \
1384 ((((u32)__val) & BIT_LEN_MASK_32(__bitlen)) << (__bitoffset)) \
1385 );
1386#define SET_BITS_TO_LE_2BYTE(__pstart, __bitoffset, __bitlen, __val) \
1387 *((u16 *)(__pstart)) = EF2BYTE \
1388 ( \
1389 LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) | \
1390 ((((u16)__val) & BIT_LEN_MASK_16(__bitlen)) << (__bitoffset)) \
1391 );
1392#define SET_BITS_TO_LE_1BYTE(__pstart, __bitoffset, __bitlen, __val) \ 1712#define SET_BITS_TO_LE_1BYTE(__pstart, __bitoffset, __bitlen, __val) \
1393 *((u8 *)(__pstart)) = EF1BYTE \ 1713 *((u8 *)(__pstart)) = EF1BYTE \
1394 ( \ 1714 ( \
@@ -1400,13 +1720,14 @@ Set subfield of little-endian 4-byte value to specified value. */
1400 mem access macro define end 1720 mem access macro define end
1401****************************************/ 1721****************************************/
1402 1722
1403#define packet_get_type(_packet) (EF1BYTE((_packet).octet[0]) & 0xFC) 1723#define byte(x, n) ((x >> (8 * n)) & 0xff)
1724
1404#define RTL_WATCH_DOG_TIME 2000 1725#define RTL_WATCH_DOG_TIME 2000
1405#define MSECS(t) msecs_to_jiffies(t) 1726#define MSECS(t) msecs_to_jiffies(t)
1406#define WLAN_FC_GET_VERS(fc) ((fc) & IEEE80211_FCTL_VERS) 1727#define WLAN_FC_GET_VERS(fc) (le16_to_cpu(fc) & IEEE80211_FCTL_VERS)
1407#define WLAN_FC_GET_TYPE(fc) ((fc) & IEEE80211_FCTL_FTYPE) 1728#define WLAN_FC_GET_TYPE(fc) (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE)
1408#define WLAN_FC_GET_STYPE(fc) ((fc) & IEEE80211_FCTL_STYPE) 1729#define WLAN_FC_GET_STYPE(fc) (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE)
1409#define WLAN_FC_MORE_DATA(fc) ((fc) & IEEE80211_FCTL_MOREDATA) 1730#define WLAN_FC_MORE_DATA(fc) (le16_to_cpu(fc) & IEEE80211_FCTL_MOREDATA)
1410#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4) 1731#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
1411#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ) 1732#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
1412#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4) 1733#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
@@ -1420,6 +1741,8 @@ Set subfield of little-endian 4-byte value to specified value. */
1420#define RT_RF_OFF_LEVL_FW_32K BIT(5) /*FW in 32k */ 1741#define RT_RF_OFF_LEVL_FW_32K BIT(5) /*FW in 32k */
1421/*Always enable ASPM and Clock Req in initialization.*/ 1742/*Always enable ASPM and Clock Req in initialization.*/
1422#define RT_RF_PS_LEVEL_ALWAYS_ASPM BIT(6) 1743#define RT_RF_PS_LEVEL_ALWAYS_ASPM BIT(6)
1744/* no matter RFOFF or SLEEP we set PS_ASPM_LEVL*/
1745#define RT_PS_LEVEL_ASPM BIT(7)
1423/*When LPS is on, disable 2R if no packet is received or transmittd.*/ 1746/*When LPS is on, disable 2R if no packet is received or transmittd.*/
1424#define RT_RF_LPS_DISALBE_2R BIT(30) 1747#define RT_RF_LPS_DISALBE_2R BIT(30)
1425#define RT_RF_LPS_LEVEL_ASPM BIT(31) /*LPS with ASPM */ 1748#define RT_RF_LPS_LEVEL_ASPM BIT(31) /*LPS with ASPM */
@@ -1433,15 +1756,6 @@ Set subfield of little-endian 4-byte value to specified value. */
1433#define container_of_dwork_rtl(x, y, z) \ 1756#define container_of_dwork_rtl(x, y, z) \
1434 container_of(container_of(x, struct delayed_work, work), y, z) 1757 container_of(container_of(x, struct delayed_work, work), y, z)
1435 1758
1436#define FILL_OCTET_STRING(_os, _octet, _len) \
1437 (_os).octet = (u8 *)(_octet); \
1438 (_os).length = (_len);
1439
1440#define CP_MACADDR(des, src) \
1441 ((des)[0] = (src)[0], (des)[1] = (src)[1],\
1442 (des)[2] = (src)[2], (des)[3] = (src)[3],\
1443 (des)[4] = (src)[4], (des)[5] = (src)[5])
1444
1445static inline u8 rtl_read_byte(struct rtl_priv *rtlpriv, u32 addr) 1759static inline u8 rtl_read_byte(struct rtl_priv *rtlpriv, u32 addr)
1446{ 1760{
1447 return rtlpriv->io.read8_sync(rtlpriv, addr); 1761 return rtlpriv->io.read8_sync(rtlpriv, addr);
diff --git a/drivers/net/wireless/wl1251/acx.c b/drivers/net/wireless/wl1251/acx.c
index 64a0214cfb2..ef8370edace 100644
--- a/drivers/net/wireless/wl1251/acx.c
+++ b/drivers/net/wireless/wl1251/acx.c
@@ -776,6 +776,31 @@ out:
776 return ret; 776 return ret;
777} 777}
778 778
779int wl1251_acx_low_rssi(struct wl1251 *wl, s8 threshold, u8 weight,
780 u8 depth, enum wl1251_acx_low_rssi_type type)
781{
782 struct acx_low_rssi *rssi;
783 int ret;
784
785 wl1251_debug(DEBUG_ACX, "acx low rssi");
786
787 rssi = kzalloc(sizeof(*rssi), GFP_KERNEL);
788 if (!rssi)
789 return -ENOMEM;
790
791 rssi->threshold = threshold;
792 rssi->weight = weight;
793 rssi->depth = depth;
794 rssi->type = type;
795
796 ret = wl1251_cmd_configure(wl, ACX_LOW_RSSI, rssi, sizeof(*rssi));
797 if (ret < 0)
798 wl1251_warning("failed to set low rssi threshold: %d", ret);
799
800 kfree(rssi);
801 return ret;
802}
803
779int wl1251_acx_set_preamble(struct wl1251 *wl, enum acx_preamble_type preamble) 804int wl1251_acx_set_preamble(struct wl1251 *wl, enum acx_preamble_type preamble)
780{ 805{
781 struct acx_preamble *acx; 806 struct acx_preamble *acx;
@@ -978,6 +1003,34 @@ out:
978 return ret; 1003 return ret;
979} 1004}
980 1005
1006int wl1251_acx_bet_enable(struct wl1251 *wl, enum wl1251_acx_bet_mode mode,
1007 u8 max_consecutive)
1008{
1009 struct wl1251_acx_bet_enable *acx;
1010 int ret;
1011
1012 wl1251_debug(DEBUG_ACX, "acx bet enable");
1013
1014 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
1015 if (!acx) {
1016 ret = -ENOMEM;
1017 goto out;
1018 }
1019
1020 acx->enable = mode;
1021 acx->max_consecutive = max_consecutive;
1022
1023 ret = wl1251_cmd_configure(wl, ACX_BET_ENABLE, acx, sizeof(*acx));
1024 if (ret < 0) {
1025 wl1251_warning("wl1251 acx bet enable failed: %d", ret);
1026 goto out;
1027 }
1028
1029out:
1030 kfree(acx);
1031 return ret;
1032}
1033
981int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max, 1034int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max,
982 u8 aifs, u16 txop) 1035 u8 aifs, u16 txop)
983{ 1036{
diff --git a/drivers/net/wireless/wl1251/acx.h b/drivers/net/wireless/wl1251/acx.h
index efcc3aaca14..c2ba100f9b1 100644
--- a/drivers/net/wireless/wl1251/acx.h
+++ b/drivers/net/wireless/wl1251/acx.h
@@ -399,6 +399,49 @@ struct acx_rts_threshold {
399 u8 pad[2]; 399 u8 pad[2];
400} __packed; 400} __packed;
401 401
402enum wl1251_acx_low_rssi_type {
403 /*
404 * The event is a "Level" indication which keeps triggering
405 * as long as the average RSSI is below the threshold.
406 */
407 WL1251_ACX_LOW_RSSI_TYPE_LEVEL = 0,
408
409 /*
410 * The event is an "Edge" indication which triggers
411 * only when the RSSI threshold is crossed from above.
412 */
413 WL1251_ACX_LOW_RSSI_TYPE_EDGE = 1,
414};
415
416struct acx_low_rssi {
417 struct acx_header header;
418
419 /*
420 * The threshold (in dBm) below (or above after low rssi
421 * indication) which the firmware generates an interrupt to the
422 * host. This parameter is signed.
423 */
424 s8 threshold;
425
426 /*
427 * The weight of the current RSSI sample, before adding the new
428 * sample, that is used to calculate the average RSSI.
429 */
430 u8 weight;
431
432 /*
433 * The number of Beacons/Probe response frames that will be
434 * received before issuing the Low or Regained RSSI event.
435 */
436 u8 depth;
437
438 /*
439 * Configures how the Low RSSI Event is triggered. Refer to
440 * enum wl1251_acx_low_rssi_type for more.
441 */
442 u8 type;
443} __packed;
444
402struct acx_beacon_filter_option { 445struct acx_beacon_filter_option {
403 struct acx_header header; 446 struct acx_header header;
404 447
@@ -1164,6 +1207,31 @@ struct wl1251_acx_wr_tbtt_and_dtim {
1164 u8 padding; 1207 u8 padding;
1165} __packed; 1208} __packed;
1166 1209
1210enum wl1251_acx_bet_mode {
1211 WL1251_ACX_BET_DISABLE = 0,
1212 WL1251_ACX_BET_ENABLE = 1,
1213};
1214
1215struct wl1251_acx_bet_enable {
1216 struct acx_header header;
1217
1218 /*
1219 * Specifies if beacon early termination procedure is enabled or
1220 * disabled, see enum wl1251_acx_bet_mode.
1221 */
1222 u8 enable;
1223
1224 /*
1225 * Specifies the maximum number of consecutive beacons that may be
1226 * early terminated. After this number is reached at least one full
1227 * beacon must be correctly received in FW before beacon ET
1228 * resumes. Range 0 - 255.
1229 */
1230 u8 max_consecutive;
1231
1232 u8 padding[2];
1233} __packed;
1234
1167struct wl1251_acx_ac_cfg { 1235struct wl1251_acx_ac_cfg {
1168 struct acx_header header; 1236 struct acx_header header;
1169 1237
@@ -1393,6 +1461,8 @@ int wl1251_acx_cca_threshold(struct wl1251 *wl);
1393int wl1251_acx_bcn_dtim_options(struct wl1251 *wl); 1461int wl1251_acx_bcn_dtim_options(struct wl1251 *wl);
1394int wl1251_acx_aid(struct wl1251 *wl, u16 aid); 1462int wl1251_acx_aid(struct wl1251 *wl, u16 aid);
1395int wl1251_acx_event_mbox_mask(struct wl1251 *wl, u32 event_mask); 1463int wl1251_acx_event_mbox_mask(struct wl1251 *wl, u32 event_mask);
1464int wl1251_acx_low_rssi(struct wl1251 *wl, s8 threshold, u8 weight,
1465 u8 depth, enum wl1251_acx_low_rssi_type type);
1396int wl1251_acx_set_preamble(struct wl1251 *wl, enum acx_preamble_type preamble); 1466int wl1251_acx_set_preamble(struct wl1251 *wl, enum acx_preamble_type preamble);
1397int wl1251_acx_cts_protect(struct wl1251 *wl, 1467int wl1251_acx_cts_protect(struct wl1251 *wl,
1398 enum acx_ctsprotect_type ctsprotect); 1468 enum acx_ctsprotect_type ctsprotect);
@@ -1401,6 +1471,8 @@ int wl1251_acx_tsf_info(struct wl1251 *wl, u64 *mactime);
1401int wl1251_acx_rate_policies(struct wl1251 *wl); 1471int wl1251_acx_rate_policies(struct wl1251 *wl);
1402int wl1251_acx_mem_cfg(struct wl1251 *wl); 1472int wl1251_acx_mem_cfg(struct wl1251 *wl);
1403int wl1251_acx_wr_tbtt_and_dtim(struct wl1251 *wl, u16 tbtt, u8 dtim); 1473int wl1251_acx_wr_tbtt_and_dtim(struct wl1251 *wl, u16 tbtt, u8 dtim);
1474int wl1251_acx_bet_enable(struct wl1251 *wl, enum wl1251_acx_bet_mode mode,
1475 u8 max_consecutive);
1404int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max, 1476int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max,
1405 u8 aifs, u16 txop); 1477 u8 aifs, u16 txop);
1406int wl1251_acx_tid_cfg(struct wl1251 *wl, u8 queue, 1478int wl1251_acx_tid_cfg(struct wl1251 *wl, u8 queue,
diff --git a/drivers/net/wireless/wl1251/event.c b/drivers/net/wireless/wl1251/event.c
index 712372e50a8..dfc4579acb0 100644
--- a/drivers/net/wireless/wl1251/event.c
+++ b/drivers/net/wireless/wl1251/event.c
@@ -90,6 +90,24 @@ static int wl1251_event_process(struct wl1251 *wl, struct event_mailbox *mbox)
90 } 90 }
91 } 91 }
92 92
93 if (wl->vif && wl->rssi_thold) {
94 if (vector & ROAMING_TRIGGER_LOW_RSSI_EVENT_ID) {
95 wl1251_debug(DEBUG_EVENT,
96 "ROAMING_TRIGGER_LOW_RSSI_EVENT");
97 ieee80211_cqm_rssi_notify(wl->vif,
98 NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
99 GFP_KERNEL);
100 }
101
102 if (vector & ROAMING_TRIGGER_REGAINED_RSSI_EVENT_ID) {
103 wl1251_debug(DEBUG_EVENT,
104 "ROAMING_TRIGGER_REGAINED_RSSI_EVENT");
105 ieee80211_cqm_rssi_notify(wl->vif,
106 NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
107 GFP_KERNEL);
108 }
109 }
110
93 return 0; 111 return 0;
94} 112}
95 113
diff --git a/drivers/net/wireless/wl1251/main.c b/drivers/net/wireless/wl1251/main.c
index 40372bac948..5a1c13878ea 100644
--- a/drivers/net/wireless/wl1251/main.c
+++ b/drivers/net/wireless/wl1251/main.c
@@ -502,6 +502,7 @@ static void wl1251_op_stop(struct ieee80211_hw *hw)
502 wl->psm = 0; 502 wl->psm = 0;
503 wl->tx_queue_stopped = false; 503 wl->tx_queue_stopped = false;
504 wl->power_level = WL1251_DEFAULT_POWER_LEVEL; 504 wl->power_level = WL1251_DEFAULT_POWER_LEVEL;
505 wl->rssi_thold = 0;
505 wl->channel = WL1251_DEFAULT_CHANNEL; 506 wl->channel = WL1251_DEFAULT_CHANNEL;
506 507
507 wl1251_debugfs_reset(wl); 508 wl1251_debugfs_reset(wl);
@@ -959,6 +960,16 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
959 if (ret < 0) 960 if (ret < 0)
960 goto out; 961 goto out;
961 962
963 if (changed & BSS_CHANGED_CQM) {
964 ret = wl1251_acx_low_rssi(wl, bss_conf->cqm_rssi_thold,
965 WL1251_DEFAULT_LOW_RSSI_WEIGHT,
966 WL1251_DEFAULT_LOW_RSSI_DEPTH,
967 WL1251_ACX_LOW_RSSI_TYPE_EDGE);
968 if (ret < 0)
969 goto out;
970 wl->rssi_thold = bss_conf->cqm_rssi_thold;
971 }
972
962 if (changed & BSS_CHANGED_BSSID) { 973 if (changed & BSS_CHANGED_BSSID) {
963 memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN); 974 memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
964 975
@@ -1313,9 +1324,11 @@ int wl1251_init_ieee80211(struct wl1251 *wl)
1313 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM | 1324 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
1314 IEEE80211_HW_SUPPORTS_PS | 1325 IEEE80211_HW_SUPPORTS_PS |
1315 IEEE80211_HW_BEACON_FILTER | 1326 IEEE80211_HW_BEACON_FILTER |
1316 IEEE80211_HW_SUPPORTS_UAPSD; 1327 IEEE80211_HW_SUPPORTS_UAPSD |
1328 IEEE80211_HW_SUPPORTS_CQM_RSSI;
1317 1329
1318 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); 1330 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
1331 BIT(NL80211_IFTYPE_ADHOC);
1319 wl->hw->wiphy->max_scan_ssids = 1; 1332 wl->hw->wiphy->max_scan_ssids = 1;
1320 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1251_band_2ghz; 1333 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1251_band_2ghz;
1321 1334
@@ -1377,6 +1390,7 @@ struct ieee80211_hw *wl1251_alloc_hw(void)
1377 wl->psm_requested = false; 1390 wl->psm_requested = false;
1378 wl->tx_queue_stopped = false; 1391 wl->tx_queue_stopped = false;
1379 wl->power_level = WL1251_DEFAULT_POWER_LEVEL; 1392 wl->power_level = WL1251_DEFAULT_POWER_LEVEL;
1393 wl->rssi_thold = 0;
1380 wl->beacon_int = WL1251_DEFAULT_BEACON_INT; 1394 wl->beacon_int = WL1251_DEFAULT_BEACON_INT;
1381 wl->dtim_period = WL1251_DEFAULT_DTIM_PERIOD; 1395 wl->dtim_period = WL1251_DEFAULT_DTIM_PERIOD;
1382 wl->vif = NULL; 1396 wl->vif = NULL;
diff --git a/drivers/net/wireless/wl1251/ps.c b/drivers/net/wireless/wl1251/ps.c
index 5ed47c8373d..9ba23ede51b 100644
--- a/drivers/net/wireless/wl1251/ps.c
+++ b/drivers/net/wireless/wl1251/ps.c
@@ -153,6 +153,11 @@ int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_cmd_ps_mode mode)
153 if (ret < 0) 153 if (ret < 0)
154 return ret; 154 return ret;
155 155
156 ret = wl1251_acx_bet_enable(wl, WL1251_ACX_BET_ENABLE,
157 WL1251_DEFAULT_BET_CONSECUTIVE);
158 if (ret < 0)
159 return ret;
160
156 ret = wl1251_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE); 161 ret = wl1251_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE);
157 if (ret < 0) 162 if (ret < 0)
158 return ret; 163 return ret;
@@ -170,6 +175,12 @@ int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_cmd_ps_mode mode)
170 if (ret < 0) 175 if (ret < 0)
171 return ret; 176 return ret;
172 177
178 /* disable BET */
179 ret = wl1251_acx_bet_enable(wl, WL1251_ACX_BET_DISABLE,
180 WL1251_DEFAULT_BET_CONSECUTIVE);
181 if (ret < 0)
182 return ret;
183
173 /* disable beacon filtering */ 184 /* disable beacon filtering */
174 ret = wl1251_acx_beacon_filter_opt(wl, false); 185 ret = wl1251_acx_beacon_filter_opt(wl, false);
175 if (ret < 0) 186 if (ret < 0)
diff --git a/drivers/net/wireless/wl1251/rx.c b/drivers/net/wireless/wl1251/rx.c
index efa53607d5c..b659e15c78d 100644
--- a/drivers/net/wireless/wl1251/rx.c
+++ b/drivers/net/wireless/wl1251/rx.c
@@ -78,7 +78,8 @@ static void wl1251_rx_status(struct wl1251 *wl,
78 */ 78 */
79 wl->noise = desc->rssi - desc->snr / 2; 79 wl->noise = desc->rssi - desc->snr / 2;
80 80
81 status->freq = ieee80211_channel_to_frequency(desc->channel); 81 status->freq = ieee80211_channel_to_frequency(desc->channel,
82 status->band);
82 83
83 status->flag |= RX_FLAG_TSFT; 84 status->flag |= RX_FLAG_TSFT;
84 85
@@ -95,8 +96,52 @@ static void wl1251_rx_status(struct wl1251 *wl,
95 if (unlikely(!(desc->flags & RX_DESC_VALID_FCS))) 96 if (unlikely(!(desc->flags & RX_DESC_VALID_FCS)))
96 status->flag |= RX_FLAG_FAILED_FCS_CRC; 97 status->flag |= RX_FLAG_FAILED_FCS_CRC;
97 98
99 switch (desc->rate) {
100 /* skip 1 and 12 Mbps because they have same value 0x0a */
101 case RATE_2MBPS:
102 status->rate_idx = 1;
103 break;
104 case RATE_5_5MBPS:
105 status->rate_idx = 2;
106 break;
107 case RATE_11MBPS:
108 status->rate_idx = 3;
109 break;
110 case RATE_6MBPS:
111 status->rate_idx = 4;
112 break;
113 case RATE_9MBPS:
114 status->rate_idx = 5;
115 break;
116 case RATE_18MBPS:
117 status->rate_idx = 7;
118 break;
119 case RATE_24MBPS:
120 status->rate_idx = 8;
121 break;
122 case RATE_36MBPS:
123 status->rate_idx = 9;
124 break;
125 case RATE_48MBPS:
126 status->rate_idx = 10;
127 break;
128 case RATE_54MBPS:
129 status->rate_idx = 11;
130 break;
131 }
132
133 /* for 1 and 12 Mbps we have to check the modulation */
134 if (desc->rate == RATE_1MBPS) {
135 if (!(desc->mod_pre & OFDM_RATE_BIT))
136 /* CCK -> RATE_1MBPS */
137 status->rate_idx = 0;
138 else
139 /* OFDM -> RATE_12MBPS */
140 status->rate_idx = 6;
141 }
98 142
99 /* FIXME: set status->rate_idx */ 143 if (desc->mod_pre & SHORT_PREAMBLE_BIT)
144 status->flag |= RX_FLAG_SHORTPRE;
100} 145}
101 146
102static void wl1251_rx_body(struct wl1251 *wl, 147static void wl1251_rx_body(struct wl1251 *wl,
diff --git a/drivers/net/wireless/wl1251/tx.c b/drivers/net/wireless/wl1251/tx.c
index 554b4f9a3d3..28121c590a2 100644
--- a/drivers/net/wireless/wl1251/tx.c
+++ b/drivers/net/wireless/wl1251/tx.c
@@ -213,16 +213,30 @@ static int wl1251_tx_send_packet(struct wl1251 *wl, struct sk_buff *skb,
213 wl1251_debug(DEBUG_TX, "skb offset %d", offset); 213 wl1251_debug(DEBUG_TX, "skb offset %d", offset);
214 214
215 /* check whether the current skb can be used */ 215 /* check whether the current skb can be used */
216 if (!skb_cloned(skb) && (skb_tailroom(skb) >= offset)) { 216 if (skb_cloned(skb) || (skb_tailroom(skb) < offset)) {
217 unsigned char *src = skb->data; 217 struct sk_buff *newskb = skb_copy_expand(skb, 0, 3,
218 GFP_KERNEL);
219
220 if (unlikely(newskb == NULL)) {
221 wl1251_error("Can't allocate skb!");
222 return -EINVAL;
223 }
218 224
219 /* align the buffer on a 4-byte boundary */ 225 tx_hdr = (struct tx_double_buffer_desc *) newskb->data;
226
227 dev_kfree_skb_any(skb);
228 wl->tx_frames[tx_hdr->id] = skb = newskb;
229
230 offset = (4 - (long)skb->data) & 0x03;
231 wl1251_debug(DEBUG_TX, "new skb offset %d", offset);
232 }
233
234 /* align the buffer on a 4-byte boundary */
235 if (offset) {
236 unsigned char *src = skb->data;
220 skb_reserve(skb, offset); 237 skb_reserve(skb, offset);
221 memmove(skb->data, src, skb->len); 238 memmove(skb->data, src, skb->len);
222 tx_hdr = (struct tx_double_buffer_desc *) skb->data; 239 tx_hdr = (struct tx_double_buffer_desc *) skb->data;
223 } else {
224 wl1251_info("No handler, fixme!");
225 return -EINVAL;
226 } 240 }
227 } 241 }
228 242
@@ -368,7 +382,7 @@ static void wl1251_tx_packet_cb(struct wl1251 *wl,
368{ 382{
369 struct ieee80211_tx_info *info; 383 struct ieee80211_tx_info *info;
370 struct sk_buff *skb; 384 struct sk_buff *skb;
371 int hdrlen, ret; 385 int hdrlen;
372 u8 *frame; 386 u8 *frame;
373 387
374 skb = wl->tx_frames[result->id]; 388 skb = wl->tx_frames[result->id];
@@ -407,40 +421,12 @@ static void wl1251_tx_packet_cb(struct wl1251 *wl,
407 ieee80211_tx_status(wl->hw, skb); 421 ieee80211_tx_status(wl->hw, skb);
408 422
409 wl->tx_frames[result->id] = NULL; 423 wl->tx_frames[result->id] = NULL;
410
411 if (wl->tx_queue_stopped) {
412 wl1251_debug(DEBUG_TX, "cb: queue was stopped");
413
414 skb = skb_dequeue(&wl->tx_queue);
415
416 /* The skb can be NULL because tx_work might have been
417 scheduled before the queue was stopped making the
418 queue empty */
419
420 if (skb) {
421 ret = wl1251_tx_frame(wl, skb);
422 if (ret == -EBUSY) {
423 /* firmware buffer is still full */
424 wl1251_debug(DEBUG_TX, "cb: fw buffer "
425 "still full");
426 skb_queue_head(&wl->tx_queue, skb);
427 return;
428 } else if (ret < 0) {
429 dev_kfree_skb(skb);
430 return;
431 }
432 }
433
434 wl1251_debug(DEBUG_TX, "cb: waking queues");
435 ieee80211_wake_queues(wl->hw);
436 wl->tx_queue_stopped = false;
437 }
438} 424}
439 425
440/* Called upon reception of a TX complete interrupt */ 426/* Called upon reception of a TX complete interrupt */
441void wl1251_tx_complete(struct wl1251 *wl) 427void wl1251_tx_complete(struct wl1251 *wl)
442{ 428{
443 int i, result_index, num_complete = 0; 429 int i, result_index, num_complete = 0, queue_len;
444 struct tx_result result[FW_TX_CMPLT_BLOCK_SIZE], *result_ptr; 430 struct tx_result result[FW_TX_CMPLT_BLOCK_SIZE], *result_ptr;
445 unsigned long flags; 431 unsigned long flags;
446 432
@@ -471,18 +457,22 @@ void wl1251_tx_complete(struct wl1251 *wl)
471 } 457 }
472 } 458 }
473 459
474 if (wl->tx_queue_stopped 460 queue_len = skb_queue_len(&wl->tx_queue);
475 &&
476 skb_queue_len(&wl->tx_queue) <= WL1251_TX_QUEUE_LOW_WATERMARK){
477 461
478 /* firmware buffer has space, restart queues */ 462 if ((num_complete > 0) && (queue_len > 0)) {
463 /* firmware buffer has space, reschedule tx_work */
464 wl1251_debug(DEBUG_TX, "tx_complete: reschedule tx_work");
465 ieee80211_queue_work(wl->hw, &wl->tx_work);
466 }
467
468 if (wl->tx_queue_stopped &&
469 queue_len <= WL1251_TX_QUEUE_LOW_WATERMARK) {
470 /* tx_queue has space, restart queues */
479 wl1251_debug(DEBUG_TX, "tx_complete: waking queues"); 471 wl1251_debug(DEBUG_TX, "tx_complete: waking queues");
480 spin_lock_irqsave(&wl->wl_lock, flags); 472 spin_lock_irqsave(&wl->wl_lock, flags);
481 ieee80211_wake_queues(wl->hw); 473 ieee80211_wake_queues(wl->hw);
482 wl->tx_queue_stopped = false; 474 wl->tx_queue_stopped = false;
483 spin_unlock_irqrestore(&wl->wl_lock, flags); 475 spin_unlock_irqrestore(&wl->wl_lock, flags);
484 ieee80211_queue_work(wl->hw, &wl->tx_work);
485
486 } 476 }
487 477
488 /* Every completed frame needs to be acknowledged */ 478 /* Every completed frame needs to be acknowledged */
diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
index c0ce2c8b43b..bb23cd522b2 100644
--- a/drivers/net/wireless/wl1251/wl1251.h
+++ b/drivers/net/wireless/wl1251/wl1251.h
@@ -370,6 +370,8 @@ struct wl1251 {
370 /* in dBm */ 370 /* in dBm */
371 int power_level; 371 int power_level;
372 372
373 int rssi_thold;
374
373 struct wl1251_stats stats; 375 struct wl1251_stats stats;
374 struct wl1251_debugfs debugfs; 376 struct wl1251_debugfs debugfs;
375 377
@@ -410,6 +412,8 @@ void wl1251_disable_interrupts(struct wl1251 *wl);
410 412
411#define WL1251_DEFAULT_CHANNEL 0 413#define WL1251_DEFAULT_CHANNEL 0
412 414
415#define WL1251_DEFAULT_BET_CONSECUTIVE 10
416
413#define CHIP_ID_1251_PG10 (0x7010101) 417#define CHIP_ID_1251_PG10 (0x7010101)
414#define CHIP_ID_1251_PG11 (0x7020101) 418#define CHIP_ID_1251_PG11 (0x7020101)
415#define CHIP_ID_1251_PG12 (0x7030101) 419#define CHIP_ID_1251_PG12 (0x7030101)
@@ -431,4 +435,7 @@ void wl1251_disable_interrupts(struct wl1251 *wl);
431#define WL1251_PART_WORK_REG_START REGISTERS_BASE 435#define WL1251_PART_WORK_REG_START REGISTERS_BASE
432#define WL1251_PART_WORK_REG_SIZE REGISTERS_WORK_SIZE 436#define WL1251_PART_WORK_REG_SIZE REGISTERS_WORK_SIZE
433 437
438#define WL1251_DEFAULT_LOW_RSSI_WEIGHT 10
439#define WL1251_DEFAULT_LOW_RSSI_DEPTH 10
440
434#endif 441#endif
diff --git a/drivers/net/wireless/wl12xx/Kconfig b/drivers/net/wireless/wl12xx/Kconfig
index 0e65bce457d..692ebff38fc 100644
--- a/drivers/net/wireless/wl12xx/Kconfig
+++ b/drivers/net/wireless/wl12xx/Kconfig
@@ -54,7 +54,7 @@ config WL12XX_SDIO
54 54
55config WL12XX_SDIO_TEST 55config WL12XX_SDIO_TEST
56 tristate "TI wl12xx SDIO testing support" 56 tristate "TI wl12xx SDIO testing support"
57 depends on WL12XX && MMC 57 depends on WL12XX && MMC && WL12XX_SDIO
58 default n 58 default n
59 ---help--- 59 ---help---
60 This module adds support for the SDIO bus testing with the 60 This module adds support for the SDIO bus testing with the
diff --git a/drivers/net/wireless/wl12xx/acx.c b/drivers/net/wireless/wl12xx/acx.c
index cc4068d2b4a..33840d95d17 100644
--- a/drivers/net/wireless/wl12xx/acx.c
+++ b/drivers/net/wireless/wl12xx/acx.c
@@ -751,10 +751,10 @@ int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats)
751 return 0; 751 return 0;
752} 752}
753 753
754int wl1271_acx_rate_policies(struct wl1271 *wl) 754int wl1271_acx_sta_rate_policies(struct wl1271 *wl)
755{ 755{
756 struct acx_rate_policy *acx; 756 struct acx_sta_rate_policy *acx;
757 struct conf_tx_rate_class *c = &wl->conf.tx.rc_conf; 757 struct conf_tx_rate_class *c = &wl->conf.tx.sta_rc_conf;
758 int idx = 0; 758 int idx = 0;
759 int ret = 0; 759 int ret = 0;
760 760
@@ -783,6 +783,10 @@ int wl1271_acx_rate_policies(struct wl1271 *wl)
783 783
784 acx->rate_class_cnt = cpu_to_le32(ACX_TX_RATE_POLICY_CNT); 784 acx->rate_class_cnt = cpu_to_le32(ACX_TX_RATE_POLICY_CNT);
785 785
786 wl1271_debug(DEBUG_ACX, "basic_rate: 0x%x, full_rate: 0x%x",
787 acx->rate_class[ACX_TX_BASIC_RATE].enabled_rates,
788 acx->rate_class[ACX_TX_AP_FULL_RATE].enabled_rates);
789
786 ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx)); 790 ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx));
787 if (ret < 0) { 791 if (ret < 0) {
788 wl1271_warning("Setting of rate policies failed: %d", ret); 792 wl1271_warning("Setting of rate policies failed: %d", ret);
@@ -794,6 +798,38 @@ out:
794 return ret; 798 return ret;
795} 799}
796 800
801int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c,
802 u8 idx)
803{
804 struct acx_ap_rate_policy *acx;
805 int ret = 0;
806
807 wl1271_debug(DEBUG_ACX, "acx ap rate policy");
808
809 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
810 if (!acx) {
811 ret = -ENOMEM;
812 goto out;
813 }
814
815 acx->rate_policy.enabled_rates = cpu_to_le32(c->enabled_rates);
816 acx->rate_policy.short_retry_limit = c->short_retry_limit;
817 acx->rate_policy.long_retry_limit = c->long_retry_limit;
818 acx->rate_policy.aflags = c->aflags;
819
820 acx->rate_policy_idx = cpu_to_le32(idx);
821
822 ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx));
823 if (ret < 0) {
824 wl1271_warning("Setting of ap rate policy failed: %d", ret);
825 goto out;
826 }
827
828out:
829 kfree(acx);
830 return ret;
831}
832
797int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max, 833int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
798 u8 aifsn, u16 txop) 834 u8 aifsn, u16 txop)
799{ 835{
@@ -915,9 +951,9 @@ out:
915 return ret; 951 return ret;
916} 952}
917 953
918int wl1271_acx_mem_cfg(struct wl1271 *wl) 954int wl1271_acx_ap_mem_cfg(struct wl1271 *wl)
919{ 955{
920 struct wl1271_acx_config_memory *mem_conf; 956 struct wl1271_acx_ap_config_memory *mem_conf;
921 int ret; 957 int ret;
922 958
923 wl1271_debug(DEBUG_ACX, "wl1271 mem cfg"); 959 wl1271_debug(DEBUG_ACX, "wl1271 mem cfg");
@@ -929,10 +965,10 @@ int wl1271_acx_mem_cfg(struct wl1271 *wl)
929 } 965 }
930 966
931 /* memory config */ 967 /* memory config */
932 mem_conf->num_stations = DEFAULT_NUM_STATIONS; 968 mem_conf->num_stations = wl->conf.mem.num_stations;
933 mem_conf->rx_mem_block_num = ACX_RX_MEM_BLOCKS; 969 mem_conf->rx_mem_block_num = wl->conf.mem.rx_block_num;
934 mem_conf->tx_min_mem_block_num = ACX_TX_MIN_MEM_BLOCKS; 970 mem_conf->tx_min_mem_block_num = wl->conf.mem.tx_min_block_num;
935 mem_conf->num_ssid_profiles = ACX_NUM_SSID_PROFILES; 971 mem_conf->num_ssid_profiles = wl->conf.mem.ssid_profiles;
936 mem_conf->total_tx_descriptors = cpu_to_le32(ACX_TX_DESCRIPTORS); 972 mem_conf->total_tx_descriptors = cpu_to_le32(ACX_TX_DESCRIPTORS);
937 973
938 ret = wl1271_cmd_configure(wl, ACX_MEM_CFG, mem_conf, 974 ret = wl1271_cmd_configure(wl, ACX_MEM_CFG, mem_conf,
@@ -947,13 +983,45 @@ out:
947 return ret; 983 return ret;
948} 984}
949 985
950int wl1271_acx_init_mem_config(struct wl1271 *wl) 986int wl1271_acx_sta_mem_cfg(struct wl1271 *wl)
951{ 987{
988 struct wl1271_acx_sta_config_memory *mem_conf;
952 int ret; 989 int ret;
953 990
954 ret = wl1271_acx_mem_cfg(wl); 991 wl1271_debug(DEBUG_ACX, "wl1271 mem cfg");
955 if (ret < 0) 992
956 return ret; 993 mem_conf = kzalloc(sizeof(*mem_conf), GFP_KERNEL);
994 if (!mem_conf) {
995 ret = -ENOMEM;
996 goto out;
997 }
998
999 /* memory config */
1000 mem_conf->num_stations = wl->conf.mem.num_stations;
1001 mem_conf->rx_mem_block_num = wl->conf.mem.rx_block_num;
1002 mem_conf->tx_min_mem_block_num = wl->conf.mem.tx_min_block_num;
1003 mem_conf->num_ssid_profiles = wl->conf.mem.ssid_profiles;
1004 mem_conf->total_tx_descriptors = cpu_to_le32(ACX_TX_DESCRIPTORS);
1005 mem_conf->dyn_mem_enable = wl->conf.mem.dynamic_memory;
1006 mem_conf->tx_free_req = wl->conf.mem.min_req_tx_blocks;
1007 mem_conf->rx_free_req = wl->conf.mem.min_req_rx_blocks;
1008 mem_conf->tx_min = wl->conf.mem.tx_min;
1009
1010 ret = wl1271_cmd_configure(wl, ACX_MEM_CFG, mem_conf,
1011 sizeof(*mem_conf));
1012 if (ret < 0) {
1013 wl1271_warning("wl1271 mem config failed: %d", ret);
1014 goto out;
1015 }
1016
1017out:
1018 kfree(mem_conf);
1019 return ret;
1020}
1021
1022int wl1271_acx_init_mem_config(struct wl1271 *wl)
1023{
1024 int ret;
957 1025
958 wl->target_mem_map = kzalloc(sizeof(struct wl1271_acx_mem_map), 1026 wl->target_mem_map = kzalloc(sizeof(struct wl1271_acx_mem_map),
959 GFP_KERNEL); 1027 GFP_KERNEL);
@@ -1233,6 +1301,7 @@ int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
1233 struct wl1271_acx_ht_capabilities *acx; 1301 struct wl1271_acx_ht_capabilities *acx;
1234 u8 mac_address[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 1302 u8 mac_address[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
1235 int ret = 0; 1303 int ret = 0;
1304 u32 ht_capabilites = 0;
1236 1305
1237 wl1271_debug(DEBUG_ACX, "acx ht capabilities setting"); 1306 wl1271_debug(DEBUG_ACX, "acx ht capabilities setting");
1238 1307
@@ -1244,16 +1313,16 @@ int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
1244 1313
1245 /* Allow HT Operation ? */ 1314 /* Allow HT Operation ? */
1246 if (allow_ht_operation) { 1315 if (allow_ht_operation) {
1247 acx->ht_capabilites = 1316 ht_capabilites =
1248 WL1271_ACX_FW_CAP_HT_OPERATION; 1317 WL1271_ACX_FW_CAP_HT_OPERATION;
1249 if (ht_cap->cap & IEEE80211_HT_CAP_GRN_FLD) 1318 if (ht_cap->cap & IEEE80211_HT_CAP_GRN_FLD)
1250 acx->ht_capabilites |= 1319 ht_capabilites |=
1251 WL1271_ACX_FW_CAP_GREENFIELD_FRAME_FORMAT; 1320 WL1271_ACX_FW_CAP_GREENFIELD_FRAME_FORMAT;
1252 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20) 1321 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
1253 acx->ht_capabilites |= 1322 ht_capabilites |=
1254 WL1271_ACX_FW_CAP_SHORT_GI_FOR_20MHZ_PACKETS; 1323 WL1271_ACX_FW_CAP_SHORT_GI_FOR_20MHZ_PACKETS;
1255 if (ht_cap->cap & IEEE80211_HT_CAP_LSIG_TXOP_PROT) 1324 if (ht_cap->cap & IEEE80211_HT_CAP_LSIG_TXOP_PROT)
1256 acx->ht_capabilites |= 1325 ht_capabilites |=
1257 WL1271_ACX_FW_CAP_LSIG_TXOP_PROTECTION; 1326 WL1271_ACX_FW_CAP_LSIG_TXOP_PROTECTION;
1258 1327
1259 /* get data from A-MPDU parameters field */ 1328 /* get data from A-MPDU parameters field */
@@ -1261,10 +1330,10 @@ int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
1261 acx->ampdu_min_spacing = ht_cap->ampdu_density; 1330 acx->ampdu_min_spacing = ht_cap->ampdu_density;
1262 1331
1263 memcpy(acx->mac_address, mac_address, ETH_ALEN); 1332 memcpy(acx->mac_address, mac_address, ETH_ALEN);
1264 } else { /* HT operations are not allowed */
1265 acx->ht_capabilites = 0;
1266 } 1333 }
1267 1334
1335 acx->ht_capabilites = cpu_to_le32(ht_capabilites);
1336
1268 ret = wl1271_cmd_configure(wl, ACX_PEER_HT_CAP, acx, sizeof(*acx)); 1337 ret = wl1271_cmd_configure(wl, ACX_PEER_HT_CAP, acx, sizeof(*acx));
1269 if (ret < 0) { 1338 if (ret < 0) {
1270 wl1271_warning("acx ht capabilities setting failed: %d", ret); 1339 wl1271_warning("acx ht capabilities setting failed: %d", ret);
@@ -1309,6 +1378,91 @@ out:
1309 return ret; 1378 return ret;
1310} 1379}
1311 1380
1381/* Configure BA session initiator/receiver parameters setting in the FW. */
1382int wl1271_acx_set_ba_session(struct wl1271 *wl,
1383 enum ieee80211_back_parties direction,
1384 u8 tid_index, u8 policy)
1385{
1386 struct wl1271_acx_ba_session_policy *acx;
1387 int ret;
1388
1389 wl1271_debug(DEBUG_ACX, "acx ba session setting");
1390
1391 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
1392 if (!acx) {
1393 ret = -ENOMEM;
1394 goto out;
1395 }
1396
1397 /* ANY role */
1398 acx->role_id = 0xff;
1399 acx->tid = tid_index;
1400 acx->enable = policy;
1401 acx->ba_direction = direction;
1402
1403 switch (direction) {
1404 case WLAN_BACK_INITIATOR:
1405 acx->win_size = wl->conf.ht.tx_ba_win_size;
1406 acx->inactivity_timeout = wl->conf.ht.inactivity_timeout;
1407 break;
1408 case WLAN_BACK_RECIPIENT:
1409 acx->win_size = RX_BA_WIN_SIZE;
1410 acx->inactivity_timeout = 0;
1411 break;
1412 default:
1413 wl1271_error("Incorrect acx command id=%x\n", direction);
1414 ret = -EINVAL;
1415 goto out;
1416 }
1417
1418 ret = wl1271_cmd_configure(wl,
1419 ACX_BA_SESSION_POLICY_CFG,
1420 acx,
1421 sizeof(*acx));
1422 if (ret < 0) {
1423 wl1271_warning("acx ba session setting failed: %d", ret);
1424 goto out;
1425 }
1426
1427out:
1428 kfree(acx);
1429 return ret;
1430}
1431
1432/* setup BA session receiver setting in the FW. */
1433int wl1271_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index, u16 ssn,
1434 bool enable)
1435{
1436 struct wl1271_acx_ba_receiver_setup *acx;
1437 int ret;
1438
1439 wl1271_debug(DEBUG_ACX, "acx ba receiver session setting");
1440
1441 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
1442 if (!acx) {
1443 ret = -ENOMEM;
1444 goto out;
1445 }
1446
1447 /* Single link for now */
1448 acx->link_id = 1;
1449 acx->tid = tid_index;
1450 acx->enable = enable;
1451 acx->win_size = 0;
1452 acx->ssn = ssn;
1453
1454 ret = wl1271_cmd_configure(wl, ACX_BA_SESSION_RX_SETUP, acx,
1455 sizeof(*acx));
1456 if (ret < 0) {
1457 wl1271_warning("acx ba receiver session failed: %d", ret);
1458 goto out;
1459 }
1460
1461out:
1462 kfree(acx);
1463 return ret;
1464}
1465
1312int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime) 1466int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime)
1313{ 1467{
1314 struct wl1271_acx_fw_tsf_information *tsf_info; 1468 struct wl1271_acx_fw_tsf_information *tsf_info;
@@ -1334,3 +1488,57 @@ out:
1334 kfree(tsf_info); 1488 kfree(tsf_info);
1335 return ret; 1489 return ret;
1336} 1490}
1491
1492int wl1271_acx_max_tx_retry(struct wl1271 *wl)
1493{
1494 struct wl1271_acx_max_tx_retry *acx = NULL;
1495 int ret;
1496
1497 wl1271_debug(DEBUG_ACX, "acx max tx retry");
1498
1499 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
1500 if (!acx)
1501 return -ENOMEM;
1502
1503 acx->max_tx_retry = cpu_to_le16(wl->conf.tx.ap_max_tx_retries);
1504
1505 ret = wl1271_cmd_configure(wl, ACX_MAX_TX_FAILURE, acx, sizeof(*acx));
1506 if (ret < 0) {
1507 wl1271_warning("acx max tx retry failed: %d", ret);
1508 goto out;
1509 }
1510
1511out:
1512 kfree(acx);
1513 return ret;
1514}
1515
1516int wl1271_acx_config_ps(struct wl1271 *wl)
1517{
1518 struct wl1271_acx_config_ps *config_ps;
1519 int ret;
1520
1521 wl1271_debug(DEBUG_ACX, "acx config ps");
1522
1523 config_ps = kzalloc(sizeof(*config_ps), GFP_KERNEL);
1524 if (!config_ps) {
1525 ret = -ENOMEM;
1526 goto out;
1527 }
1528
1529 config_ps->exit_retries = wl->conf.conn.psm_exit_retries;
1530 config_ps->enter_retries = wl->conf.conn.psm_entry_retries;
1531 config_ps->null_data_rate = cpu_to_le32(wl->basic_rate);
1532
1533 ret = wl1271_cmd_configure(wl, ACX_CONFIG_PS, config_ps,
1534 sizeof(*config_ps));
1535
1536 if (ret < 0) {
1537 wl1271_warning("acx config ps failed: %d", ret);
1538 goto out;
1539 }
1540
1541out:
1542 kfree(config_ps);
1543 return ret;
1544}
diff --git a/drivers/net/wireless/wl12xx/acx.h b/drivers/net/wireless/wl12xx/acx.h
index 7bd8e4db4a7..4e301de916b 100644
--- a/drivers/net/wireless/wl12xx/acx.h
+++ b/drivers/net/wireless/wl12xx/acx.h
@@ -133,7 +133,6 @@ enum {
133 133
134#define DEFAULT_UCAST_PRIORITY 0 134#define DEFAULT_UCAST_PRIORITY 0
135#define DEFAULT_RX_Q_PRIORITY 0 135#define DEFAULT_RX_Q_PRIORITY 0
136#define DEFAULT_NUM_STATIONS 1
137#define DEFAULT_RXQ_PRIORITY 0 /* low 0 .. 15 high */ 136#define DEFAULT_RXQ_PRIORITY 0 /* low 0 .. 15 high */
138#define DEFAULT_RXQ_TYPE 0x07 /* All frames, Data/Ctrl/Mgmt */ 137#define DEFAULT_RXQ_TYPE 0x07 /* All frames, Data/Ctrl/Mgmt */
139#define TRACE_BUFFER_MAX_SIZE 256 138#define TRACE_BUFFER_MAX_SIZE 256
@@ -747,13 +746,23 @@ struct acx_rate_class {
747#define ACX_TX_BASIC_RATE 0 746#define ACX_TX_BASIC_RATE 0
748#define ACX_TX_AP_FULL_RATE 1 747#define ACX_TX_AP_FULL_RATE 1
749#define ACX_TX_RATE_POLICY_CNT 2 748#define ACX_TX_RATE_POLICY_CNT 2
750struct acx_rate_policy { 749struct acx_sta_rate_policy {
751 struct acx_header header; 750 struct acx_header header;
752 751
753 __le32 rate_class_cnt; 752 __le32 rate_class_cnt;
754 struct acx_rate_class rate_class[CONF_TX_MAX_RATE_CLASSES]; 753 struct acx_rate_class rate_class[CONF_TX_MAX_RATE_CLASSES];
755} __packed; 754} __packed;
756 755
756
757#define ACX_TX_AP_MODE_MGMT_RATE 4
758#define ACX_TX_AP_MODE_BCST_RATE 5
759struct acx_ap_rate_policy {
760 struct acx_header header;
761
762 __le32 rate_policy_idx;
763 struct acx_rate_class rate_policy;
764} __packed;
765
757struct acx_ac_cfg { 766struct acx_ac_cfg {
758 struct acx_header header; 767 struct acx_header header;
759 u8 ac; 768 u8 ac;
@@ -787,12 +796,19 @@ struct acx_tx_config_options {
787 __le16 tx_compl_threshold; /* number of packets */ 796 __le16 tx_compl_threshold; /* number of packets */
788} __packed; 797} __packed;
789 798
790#define ACX_RX_MEM_BLOCKS 70
791#define ACX_TX_MIN_MEM_BLOCKS 40
792#define ACX_TX_DESCRIPTORS 32 799#define ACX_TX_DESCRIPTORS 32
793#define ACX_NUM_SSID_PROFILES 1
794 800
795struct wl1271_acx_config_memory { 801struct wl1271_acx_ap_config_memory {
802 struct acx_header header;
803
804 u8 rx_mem_block_num;
805 u8 tx_min_mem_block_num;
806 u8 num_stations;
807 u8 num_ssid_profiles;
808 __le32 total_tx_descriptors;
809} __packed;
810
811struct wl1271_acx_sta_config_memory {
796 struct acx_header header; 812 struct acx_header header;
797 813
798 u8 rx_mem_block_num; 814 u8 rx_mem_block_num;
@@ -800,6 +816,10 @@ struct wl1271_acx_config_memory {
800 u8 num_stations; 816 u8 num_stations;
801 u8 num_ssid_profiles; 817 u8 num_ssid_profiles;
802 __le32 total_tx_descriptors; 818 __le32 total_tx_descriptors;
819 u8 dyn_mem_enable;
820 u8 tx_free_req;
821 u8 rx_free_req;
822 u8 tx_min;
803} __packed; 823} __packed;
804 824
805struct wl1271_acx_mem_map { 825struct wl1271_acx_mem_map {
@@ -1051,6 +1071,59 @@ struct wl1271_acx_ht_information {
1051 u8 padding[3]; 1071 u8 padding[3];
1052} __packed; 1072} __packed;
1053 1073
1074#define RX_BA_WIN_SIZE 8
1075
1076struct wl1271_acx_ba_session_policy {
1077 struct acx_header header;
1078 /*
1079 * Specifies role Id, Range 0-7, 0xFF means ANY role.
1080 * Future use. For now this field is irrelevant
1081 */
1082 u8 role_id;
1083 /*
1084 * Specifies Link Id, Range 0-31, 0xFF means ANY Link Id.
1085 * Not applicable if Role Id is set to ANY.
1086 */
1087 u8 link_id;
1088
1089 u8 tid;
1090
1091 u8 enable;
1092
1093 /* Windows size in number of packets */
1094 u16 win_size;
1095
1096 /*
1097 * As initiator inactivity timeout in time units(TU) of 1024us.
1098 * As receiver reserved
1099 */
1100 u16 inactivity_timeout;
1101
1102 /* Initiator = 1/Receiver = 0 */
1103 u8 ba_direction;
1104
1105 u8 padding[3];
1106} __packed;
1107
1108struct wl1271_acx_ba_receiver_setup {
1109 struct acx_header header;
1110
1111 /* Specifies Link Id, Range 0-31, 0xFF means ANY Link Id */
1112 u8 link_id;
1113
1114 u8 tid;
1115
1116 u8 enable;
1117
1118 u8 padding[1];
1119
1120 /* Windows size in number of packets */
1121 u16 win_size;
1122
1123 /* BA session starting sequence number. RANGE 0-FFF */
1124 u16 ssn;
1125} __packed;
1126
1054struct wl1271_acx_fw_tsf_information { 1127struct wl1271_acx_fw_tsf_information {
1055 struct acx_header header; 1128 struct acx_header header;
1056 1129
@@ -1062,6 +1135,26 @@ struct wl1271_acx_fw_tsf_information {
1062 u8 padding[3]; 1135 u8 padding[3];
1063} __packed; 1136} __packed;
1064 1137
1138struct wl1271_acx_max_tx_retry {
1139 struct acx_header header;
1140
1141 /*
1142 * the number of frames transmission failures before
1143 * issuing the aging event.
1144 */
1145 __le16 max_tx_retry;
1146 u8 padding_1[2];
1147} __packed;
1148
1149struct wl1271_acx_config_ps {
1150 struct acx_header header;
1151
1152 u8 exit_retries;
1153 u8 enter_retries;
1154 u8 padding[2];
1155 __le32 null_data_rate;
1156} __packed;
1157
1065enum { 1158enum {
1066 ACX_WAKE_UP_CONDITIONS = 0x0002, 1159 ACX_WAKE_UP_CONDITIONS = 0x0002,
1067 ACX_MEM_CFG = 0x0003, 1160 ACX_MEM_CFG = 0x0003,
@@ -1113,22 +1206,23 @@ enum {
1113 ACX_RSSI_SNR_WEIGHTS = 0x0052, 1206 ACX_RSSI_SNR_WEIGHTS = 0x0052,
1114 ACX_KEEP_ALIVE_MODE = 0x0053, 1207 ACX_KEEP_ALIVE_MODE = 0x0053,
1115 ACX_SET_KEEP_ALIVE_CONFIG = 0x0054, 1208 ACX_SET_KEEP_ALIVE_CONFIG = 0x0054,
1116 ACX_BA_SESSION_RESPONDER_POLICY = 0x0055, 1209 ACX_BA_SESSION_POLICY_CFG = 0x0055,
1117 ACX_BA_SESSION_INITIATOR_POLICY = 0x0056, 1210 ACX_BA_SESSION_RX_SETUP = 0x0056,
1118 ACX_PEER_HT_CAP = 0x0057, 1211 ACX_PEER_HT_CAP = 0x0057,
1119 ACX_HT_BSS_OPERATION = 0x0058, 1212 ACX_HT_BSS_OPERATION = 0x0058,
1120 ACX_COEX_ACTIVITY = 0x0059, 1213 ACX_COEX_ACTIVITY = 0x0059,
1121 ACX_SET_DCO_ITRIM_PARAMS = 0x0061, 1214 ACX_SET_DCO_ITRIM_PARAMS = 0x0061,
1215 ACX_GEN_FW_CMD = 0x0070,
1216 ACX_HOST_IF_CFG_BITMAP = 0x0071,
1217 ACX_MAX_TX_FAILURE = 0x0072,
1122 DOT11_RX_MSDU_LIFE_TIME = 0x1004, 1218 DOT11_RX_MSDU_LIFE_TIME = 0x1004,
1123 DOT11_CUR_TX_PWR = 0x100D, 1219 DOT11_CUR_TX_PWR = 0x100D,
1124 DOT11_RX_DOT11_MODE = 0x1012, 1220 DOT11_RX_DOT11_MODE = 0x1012,
1125 DOT11_RTS_THRESHOLD = 0x1013, 1221 DOT11_RTS_THRESHOLD = 0x1013,
1126 DOT11_GROUP_ADDRESS_TBL = 0x1014, 1222 DOT11_GROUP_ADDRESS_TBL = 0x1014,
1127 ACX_PM_CONFIG = 0x1016, 1223 ACX_PM_CONFIG = 0x1016,
1128 1224 ACX_CONFIG_PS = 0x1017,
1129 MAX_DOT11_IE = DOT11_GROUP_ADDRESS_TBL, 1225 ACX_CONFIG_HANGOVER = 0x1018,
1130
1131 MAX_IE = 0xFFFF
1132}; 1226};
1133 1227
1134 1228
@@ -1160,7 +1254,9 @@ int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble);
1160int wl1271_acx_cts_protect(struct wl1271 *wl, 1254int wl1271_acx_cts_protect(struct wl1271 *wl,
1161 enum acx_ctsprotect_type ctsprotect); 1255 enum acx_ctsprotect_type ctsprotect);
1162int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats); 1256int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats);
1163int wl1271_acx_rate_policies(struct wl1271 *wl); 1257int wl1271_acx_sta_rate_policies(struct wl1271 *wl);
1258int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c,
1259 u8 idx);
1164int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max, 1260int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
1165 u8 aifsn, u16 txop); 1261 u8 aifsn, u16 txop);
1166int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type, 1262int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
@@ -1168,7 +1264,8 @@ int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
1168 u32 apsd_conf0, u32 apsd_conf1); 1264 u32 apsd_conf0, u32 apsd_conf1);
1169int wl1271_acx_frag_threshold(struct wl1271 *wl, u16 frag_threshold); 1265int wl1271_acx_frag_threshold(struct wl1271 *wl, u16 frag_threshold);
1170int wl1271_acx_tx_config_options(struct wl1271 *wl); 1266int wl1271_acx_tx_config_options(struct wl1271 *wl);
1171int wl1271_acx_mem_cfg(struct wl1271 *wl); 1267int wl1271_acx_ap_mem_cfg(struct wl1271 *wl);
1268int wl1271_acx_sta_mem_cfg(struct wl1271 *wl);
1172int wl1271_acx_init_mem_config(struct wl1271 *wl); 1269int wl1271_acx_init_mem_config(struct wl1271 *wl);
1173int wl1271_acx_init_rx_interrupt(struct wl1271 *wl); 1270int wl1271_acx_init_rx_interrupt(struct wl1271 *wl);
1174int wl1271_acx_smart_reflex(struct wl1271 *wl); 1271int wl1271_acx_smart_reflex(struct wl1271 *wl);
@@ -1185,6 +1282,13 @@ int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
1185 bool allow_ht_operation); 1282 bool allow_ht_operation);
1186int wl1271_acx_set_ht_information(struct wl1271 *wl, 1283int wl1271_acx_set_ht_information(struct wl1271 *wl,
1187 u16 ht_operation_mode); 1284 u16 ht_operation_mode);
1285int wl1271_acx_set_ba_session(struct wl1271 *wl,
1286 enum ieee80211_back_parties direction,
1287 u8 tid_index, u8 policy);
1288int wl1271_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index, u16 ssn,
1289 bool enable);
1188int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime); 1290int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime);
1291int wl1271_acx_max_tx_retry(struct wl1271 *wl);
1292int wl1271_acx_config_ps(struct wl1271 *wl);
1189 1293
1190#endif /* __WL1271_ACX_H__ */ 1294#endif /* __WL1271_ACX_H__ */
diff --git a/drivers/net/wireless/wl12xx/boot.c b/drivers/net/wireless/wl12xx/boot.c
index 4df04f84d7f..1ffbad67d2d 100644
--- a/drivers/net/wireless/wl12xx/boot.c
+++ b/drivers/net/wireless/wl12xx/boot.c
@@ -28,6 +28,7 @@
28#include "boot.h" 28#include "boot.h"
29#include "io.h" 29#include "io.h"
30#include "event.h" 30#include "event.h"
31#include "rx.h"
31 32
32static struct wl1271_partition_set part_table[PART_TABLE_LEN] = { 33static struct wl1271_partition_set part_table[PART_TABLE_LEN] = {
33 [PART_DOWN] = { 34 [PART_DOWN] = {
@@ -100,6 +101,22 @@ static void wl1271_boot_set_ecpu_ctrl(struct wl1271 *wl, u32 flag)
100 wl1271_write32(wl, ACX_REG_ECPU_CONTROL, cpu_ctrl); 101 wl1271_write32(wl, ACX_REG_ECPU_CONTROL, cpu_ctrl);
101} 102}
102 103
104static void wl1271_parse_fw_ver(struct wl1271 *wl)
105{
106 int ret;
107
108 ret = sscanf(wl->chip.fw_ver_str + 4, "%u.%u.%u.%u.%u",
109 &wl->chip.fw_ver[0], &wl->chip.fw_ver[1],
110 &wl->chip.fw_ver[2], &wl->chip.fw_ver[3],
111 &wl->chip.fw_ver[4]);
112
113 if (ret != 5) {
114 wl1271_warning("fw version incorrect value");
115 memset(wl->chip.fw_ver, 0, sizeof(wl->chip.fw_ver));
116 return;
117 }
118}
119
103static void wl1271_boot_fw_version(struct wl1271 *wl) 120static void wl1271_boot_fw_version(struct wl1271 *wl)
104{ 121{
105 struct wl1271_static_data static_data; 122 struct wl1271_static_data static_data;
@@ -107,11 +124,13 @@ static void wl1271_boot_fw_version(struct wl1271 *wl)
107 wl1271_read(wl, wl->cmd_box_addr, &static_data, sizeof(static_data), 124 wl1271_read(wl, wl->cmd_box_addr, &static_data, sizeof(static_data),
108 false); 125 false);
109 126
110 strncpy(wl->chip.fw_ver, static_data.fw_version, 127 strncpy(wl->chip.fw_ver_str, static_data.fw_version,
111 sizeof(wl->chip.fw_ver)); 128 sizeof(wl->chip.fw_ver_str));
112 129
113 /* make sure the string is NULL-terminated */ 130 /* make sure the string is NULL-terminated */
114 wl->chip.fw_ver[sizeof(wl->chip.fw_ver) - 1] = '\0'; 131 wl->chip.fw_ver_str[sizeof(wl->chip.fw_ver_str) - 1] = '\0';
132
133 wl1271_parse_fw_ver(wl);
115} 134}
116 135
117static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf, 136static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
@@ -231,7 +250,9 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
231 */ 250 */
232 if (wl->nvs_len == sizeof(struct wl1271_nvs_file) || 251 if (wl->nvs_len == sizeof(struct wl1271_nvs_file) ||
233 wl->nvs_len == WL1271_INI_LEGACY_NVS_FILE_SIZE) { 252 wl->nvs_len == WL1271_INI_LEGACY_NVS_FILE_SIZE) {
234 if (wl->nvs->general_params.dual_mode_select) 253 /* for now 11a is unsupported in AP mode */
254 if (wl->bss_type != BSS_TYPE_AP_BSS &&
255 wl->nvs->general_params.dual_mode_select)
235 wl->enable_11a = true; 256 wl->enable_11a = true;
236 } 257 }
237 258
@@ -431,6 +452,9 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
431 PSPOLL_DELIVERY_FAILURE_EVENT_ID | 452 PSPOLL_DELIVERY_FAILURE_EVENT_ID |
432 SOFT_GEMINI_SENSE_EVENT_ID; 453 SOFT_GEMINI_SENSE_EVENT_ID;
433 454
455 if (wl->bss_type == BSS_TYPE_AP_BSS)
456 wl->event_mask |= STA_REMOVE_COMPLETE_EVENT_ID;
457
434 ret = wl1271_event_unmask(wl); 458 ret = wl1271_event_unmask(wl);
435 if (ret < 0) { 459 if (ret < 0) {
436 wl1271_error("EVENT mask setting failed"); 460 wl1271_error("EVENT mask setting failed");
@@ -595,8 +619,7 @@ int wl1271_boot(struct wl1271 *wl)
595 wl1271_boot_enable_interrupts(wl); 619 wl1271_boot_enable_interrupts(wl);
596 620
597 /* set the wl1271 default filters */ 621 /* set the wl1271 default filters */
598 wl->rx_config = WL1271_DEFAULT_RX_CONFIG; 622 wl1271_set_default_filters(wl);
599 wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
600 623
601 wl1271_event_mbox_config(wl); 624 wl1271_event_mbox_config(wl);
602 625
diff --git a/drivers/net/wireless/wl12xx/cmd.c b/drivers/net/wireless/wl12xx/cmd.c
index 0106628aa5a..97ffd7aa57a 100644
--- a/drivers/net/wireless/wl12xx/cmd.c
+++ b/drivers/net/wireless/wl12xx/cmd.c
@@ -36,6 +36,7 @@
36#include "wl12xx_80211.h" 36#include "wl12xx_80211.h"
37#include "cmd.h" 37#include "cmd.h"
38#include "event.h" 38#include "event.h"
39#include "tx.h"
39 40
40#define WL1271_CMD_FAST_POLL_COUNT 50 41#define WL1271_CMD_FAST_POLL_COUNT 50
41 42
@@ -221,7 +222,7 @@ int wl1271_cmd_ext_radio_parms(struct wl1271 *wl)
221 * Poll the mailbox event field until any of the bits in the mask is set or a 222 * Poll the mailbox event field until any of the bits in the mask is set or a
222 * timeout occurs (WL1271_EVENT_TIMEOUT in msecs) 223 * timeout occurs (WL1271_EVENT_TIMEOUT in msecs)
223 */ 224 */
224static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask) 225static int wl1271_cmd_wait_for_event_or_timeout(struct wl1271 *wl, u32 mask)
225{ 226{
226 u32 events_vector, event; 227 u32 events_vector, event;
227 unsigned long timeout; 228 unsigned long timeout;
@@ -230,7 +231,8 @@ static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
230 231
231 do { 232 do {
232 if (time_after(jiffies, timeout)) { 233 if (time_after(jiffies, timeout)) {
233 ieee80211_queue_work(wl->hw, &wl->recovery_work); 234 wl1271_debug(DEBUG_CMD, "timeout waiting for event %d",
235 (int)mask);
234 return -ETIMEDOUT; 236 return -ETIMEDOUT;
235 } 237 }
236 238
@@ -248,6 +250,19 @@ static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
248 return 0; 250 return 0;
249} 251}
250 252
253static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
254{
255 int ret;
256
257 ret = wl1271_cmd_wait_for_event_or_timeout(wl, mask);
258 if (ret != 0) {
259 ieee80211_queue_work(wl->hw, &wl->recovery_work);
260 return ret;
261 }
262
263 return 0;
264}
265
251int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type) 266int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type)
252{ 267{
253 struct wl1271_cmd_join *join; 268 struct wl1271_cmd_join *join;
@@ -271,6 +286,7 @@ int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type)
271 join->rx_filter_options = cpu_to_le32(wl->rx_filter); 286 join->rx_filter_options = cpu_to_le32(wl->rx_filter);
272 join->bss_type = bss_type; 287 join->bss_type = bss_type;
273 join->basic_rate_set = cpu_to_le32(wl->basic_rate_set); 288 join->basic_rate_set = cpu_to_le32(wl->basic_rate_set);
289 join->supported_rate_set = cpu_to_le32(wl->rate_set);
274 290
275 if (wl->band == IEEE80211_BAND_5GHZ) 291 if (wl->band == IEEE80211_BAND_5GHZ)
276 join->bss_type |= WL1271_JOIN_CMD_BSS_TYPE_5GHZ; 292 join->bss_type |= WL1271_JOIN_CMD_BSS_TYPE_5GHZ;
@@ -288,6 +304,9 @@ int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type)
288 wl->tx_security_last_seq = 0; 304 wl->tx_security_last_seq = 0;
289 wl->tx_security_seq = 0; 305 wl->tx_security_seq = 0;
290 306
307 wl1271_debug(DEBUG_CMD, "cmd join: basic_rate_set=0x%x, rate_set=0x%x",
308 join->basic_rate_set, join->supported_rate_set);
309
291 ret = wl1271_cmd_send(wl, CMD_START_JOIN, join, sizeof(*join), 0); 310 ret = wl1271_cmd_send(wl, CMD_START_JOIN, join, sizeof(*join), 0);
292 if (ret < 0) { 311 if (ret < 0) {
293 wl1271_error("failed to initiate cmd join"); 312 wl1271_error("failed to initiate cmd join");
@@ -439,7 +458,7 @@ out:
439 return ret; 458 return ret;
440} 459}
441 460
442int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, u32 rates, bool send) 461int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode)
443{ 462{
444 struct wl1271_cmd_ps_params *ps_params = NULL; 463 struct wl1271_cmd_ps_params *ps_params = NULL;
445 int ret = 0; 464 int ret = 0;
@@ -453,10 +472,6 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, u32 rates, bool send)
453 } 472 }
454 473
455 ps_params->ps_mode = ps_mode; 474 ps_params->ps_mode = ps_mode;
456 ps_params->send_null_data = send;
457 ps_params->retries = wl->conf.conn.psm_entry_nullfunc_retries;
458 ps_params->hang_over_period = wl->conf.conn.psm_entry_hangover_period;
459 ps_params->null_data_rate = cpu_to_le32(rates);
460 475
461 ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params, 476 ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params,
462 sizeof(*ps_params), 0); 477 sizeof(*ps_params), 0);
@@ -490,8 +505,8 @@ int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
490 cmd->len = cpu_to_le16(buf_len); 505 cmd->len = cpu_to_le16(buf_len);
491 cmd->template_type = template_id; 506 cmd->template_type = template_id;
492 cmd->enabled_rates = cpu_to_le32(rates); 507 cmd->enabled_rates = cpu_to_le32(rates);
493 cmd->short_retry_limit = wl->conf.tx.rc_conf.short_retry_limit; 508 cmd->short_retry_limit = wl->conf.tx.tmpl_short_retry_limit;
494 cmd->long_retry_limit = wl->conf.tx.rc_conf.long_retry_limit; 509 cmd->long_retry_limit = wl->conf.tx.tmpl_long_retry_limit;
495 cmd->index = index; 510 cmd->index = index;
496 511
497 if (buf) 512 if (buf)
@@ -659,15 +674,15 @@ int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr)
659 674
660 /* llc layer */ 675 /* llc layer */
661 memcpy(tmpl.llc_hdr, rfc1042_header, sizeof(rfc1042_header)); 676 memcpy(tmpl.llc_hdr, rfc1042_header, sizeof(rfc1042_header));
662 tmpl.llc_type = htons(ETH_P_ARP); 677 tmpl.llc_type = cpu_to_be16(ETH_P_ARP);
663 678
664 /* arp header */ 679 /* arp header */
665 arp_hdr = &tmpl.arp_hdr; 680 arp_hdr = &tmpl.arp_hdr;
666 arp_hdr->ar_hrd = htons(ARPHRD_ETHER); 681 arp_hdr->ar_hrd = cpu_to_be16(ARPHRD_ETHER);
667 arp_hdr->ar_pro = htons(ETH_P_IP); 682 arp_hdr->ar_pro = cpu_to_be16(ETH_P_IP);
668 arp_hdr->ar_hln = ETH_ALEN; 683 arp_hdr->ar_hln = ETH_ALEN;
669 arp_hdr->ar_pln = 4; 684 arp_hdr->ar_pln = 4;
670 arp_hdr->ar_op = htons(ARPOP_REPLY); 685 arp_hdr->ar_op = cpu_to_be16(ARPOP_REPLY);
671 686
672 /* arp payload */ 687 /* arp payload */
673 memcpy(tmpl.sender_hw, wl->vif->addr, ETH_ALEN); 688 memcpy(tmpl.sender_hw, wl->vif->addr, ETH_ALEN);
@@ -702,9 +717,9 @@ int wl1271_build_qos_null_data(struct wl1271 *wl)
702 wl->basic_rate); 717 wl->basic_rate);
703} 718}
704 719
705int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id) 720int wl1271_cmd_set_sta_default_wep_key(struct wl1271 *wl, u8 id)
706{ 721{
707 struct wl1271_cmd_set_keys *cmd; 722 struct wl1271_cmd_set_sta_keys *cmd;
708 int ret = 0; 723 int ret = 0;
709 724
710 wl1271_debug(DEBUG_CMD, "cmd set_default_wep_key %d", id); 725 wl1271_debug(DEBUG_CMD, "cmd set_default_wep_key %d", id);
@@ -731,11 +746,42 @@ out:
731 return ret; 746 return ret;
732} 747}
733 748
734int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, 749int wl1271_cmd_set_ap_default_wep_key(struct wl1271 *wl, u8 id)
750{
751 struct wl1271_cmd_set_ap_keys *cmd;
752 int ret = 0;
753
754 wl1271_debug(DEBUG_CMD, "cmd set_ap_default_wep_key %d", id);
755
756 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
757 if (!cmd) {
758 ret = -ENOMEM;
759 goto out;
760 }
761
762 cmd->hlid = WL1271_AP_BROADCAST_HLID;
763 cmd->key_id = id;
764 cmd->lid_key_type = WEP_DEFAULT_LID_TYPE;
765 cmd->key_action = cpu_to_le16(KEY_SET_ID);
766 cmd->key_type = KEY_WEP;
767
768 ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0);
769 if (ret < 0) {
770 wl1271_warning("cmd set_ap_default_wep_key failed: %d", ret);
771 goto out;
772 }
773
774out:
775 kfree(cmd);
776
777 return ret;
778}
779
780int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
735 u8 key_size, const u8 *key, const u8 *addr, 781 u8 key_size, const u8 *key, const u8 *addr,
736 u32 tx_seq_32, u16 tx_seq_16) 782 u32 tx_seq_32, u16 tx_seq_16)
737{ 783{
738 struct wl1271_cmd_set_keys *cmd; 784 struct wl1271_cmd_set_sta_keys *cmd;
739 int ret = 0; 785 int ret = 0;
740 786
741 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 787 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -788,6 +834,67 @@ out:
788 return ret; 834 return ret;
789} 835}
790 836
837int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
838 u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
839 u16 tx_seq_16)
840{
841 struct wl1271_cmd_set_ap_keys *cmd;
842 int ret = 0;
843 u8 lid_type;
844
845 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
846 if (!cmd)
847 return -ENOMEM;
848
849 if (hlid == WL1271_AP_BROADCAST_HLID) {
850 if (key_type == KEY_WEP)
851 lid_type = WEP_DEFAULT_LID_TYPE;
852 else
853 lid_type = BROADCAST_LID_TYPE;
854 } else {
855 lid_type = UNICAST_LID_TYPE;
856 }
857
858 wl1271_debug(DEBUG_CRYPT, "ap key action: %d id: %d lid: %d type: %d"
859 " hlid: %d", (int)action, (int)id, (int)lid_type,
860 (int)key_type, (int)hlid);
861
862 cmd->lid_key_type = lid_type;
863 cmd->hlid = hlid;
864 cmd->key_action = cpu_to_le16(action);
865 cmd->key_size = key_size;
866 cmd->key_type = key_type;
867 cmd->key_id = id;
868 cmd->ac_seq_num16[0] = cpu_to_le16(tx_seq_16);
869 cmd->ac_seq_num32[0] = cpu_to_le32(tx_seq_32);
870
871 if (key_type == KEY_TKIP) {
872 /*
873 * We get the key in the following form:
874 * TKIP (16 bytes) - TX MIC (8 bytes) - RX MIC (8 bytes)
875 * but the target is expecting:
876 * TKIP - RX MIC - TX MIC
877 */
878 memcpy(cmd->key, key, 16);
879 memcpy(cmd->key + 16, key + 24, 8);
880 memcpy(cmd->key + 24, key + 16, 8);
881 } else {
882 memcpy(cmd->key, key, key_size);
883 }
884
885 wl1271_dump(DEBUG_CRYPT, "TARGET AP KEY: ", cmd, sizeof(*cmd));
886
887 ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0);
888 if (ret < 0) {
889 wl1271_warning("could not set ap keys");
890 goto out;
891 }
892
893out:
894 kfree(cmd);
895 return ret;
896}
897
791int wl1271_cmd_disconnect(struct wl1271 *wl) 898int wl1271_cmd_disconnect(struct wl1271 *wl)
792{ 899{
793 struct wl1271_cmd_disconnect *cmd; 900 struct wl1271_cmd_disconnect *cmd;
@@ -850,3 +957,180 @@ out_free:
850out: 957out:
851 return ret; 958 return ret;
852} 959}
960
961int wl1271_cmd_start_bss(struct wl1271 *wl)
962{
963 struct wl1271_cmd_bss_start *cmd;
964 struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf;
965 int ret;
966
967 wl1271_debug(DEBUG_CMD, "cmd start bss");
968
969 /*
970 * FIXME: We currently do not support hidden SSID. The real SSID
971 * should be fetched from mac80211 first.
972 */
973 if (wl->ssid_len == 0) {
974 wl1271_warning("Hidden SSID currently not supported for AP");
975 ret = -EINVAL;
976 goto out;
977 }
978
979 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
980 if (!cmd) {
981 ret = -ENOMEM;
982 goto out;
983 }
984
985 memcpy(cmd->bssid, bss_conf->bssid, ETH_ALEN);
986
987 cmd->aging_period = cpu_to_le16(WL1271_AP_DEF_INACTIV_SEC);
988 cmd->bss_index = WL1271_AP_BSS_INDEX;
989 cmd->global_hlid = WL1271_AP_GLOBAL_HLID;
990 cmd->broadcast_hlid = WL1271_AP_BROADCAST_HLID;
991 cmd->basic_rate_set = cpu_to_le32(wl->basic_rate_set);
992 cmd->beacon_interval = cpu_to_le16(wl->beacon_int);
993 cmd->dtim_interval = bss_conf->dtim_period;
994 cmd->beacon_expiry = WL1271_AP_DEF_BEACON_EXP;
995 cmd->channel = wl->channel;
996 cmd->ssid_len = wl->ssid_len;
997 cmd->ssid_type = SSID_TYPE_PUBLIC;
998 memcpy(cmd->ssid, wl->ssid, wl->ssid_len);
999
1000 switch (wl->band) {
1001 case IEEE80211_BAND_2GHZ:
1002 cmd->band = RADIO_BAND_2_4GHZ;
1003 break;
1004 case IEEE80211_BAND_5GHZ:
1005 cmd->band = RADIO_BAND_5GHZ;
1006 break;
1007 default:
1008 wl1271_warning("bss start - unknown band: %d", (int)wl->band);
1009 cmd->band = RADIO_BAND_2_4GHZ;
1010 break;
1011 }
1012
1013 ret = wl1271_cmd_send(wl, CMD_BSS_START, cmd, sizeof(*cmd), 0);
1014 if (ret < 0) {
1015 wl1271_error("failed to initiate cmd start bss");
1016 goto out_free;
1017 }
1018
1019out_free:
1020 kfree(cmd);
1021
1022out:
1023 return ret;
1024}
1025
1026int wl1271_cmd_stop_bss(struct wl1271 *wl)
1027{
1028 struct wl1271_cmd_bss_start *cmd;
1029 int ret;
1030
1031 wl1271_debug(DEBUG_CMD, "cmd stop bss");
1032
1033 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1034 if (!cmd) {
1035 ret = -ENOMEM;
1036 goto out;
1037 }
1038
1039 cmd->bss_index = WL1271_AP_BSS_INDEX;
1040
1041 ret = wl1271_cmd_send(wl, CMD_BSS_STOP, cmd, sizeof(*cmd), 0);
1042 if (ret < 0) {
1043 wl1271_error("failed to initiate cmd stop bss");
1044 goto out_free;
1045 }
1046
1047out_free:
1048 kfree(cmd);
1049
1050out:
1051 return ret;
1052}
1053
1054int wl1271_cmd_add_sta(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid)
1055{
1056 struct wl1271_cmd_add_sta *cmd;
1057 int ret;
1058
1059 wl1271_debug(DEBUG_CMD, "cmd add sta %d", (int)hlid);
1060
1061 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1062 if (!cmd) {
1063 ret = -ENOMEM;
1064 goto out;
1065 }
1066
1067 /* currently we don't support UAPSD */
1068 cmd->sp_len = 0;
1069
1070 memcpy(cmd->addr, sta->addr, ETH_ALEN);
1071 cmd->bss_index = WL1271_AP_BSS_INDEX;
1072 cmd->aid = sta->aid;
1073 cmd->hlid = hlid;
1074
1075 /*
1076 * FIXME: Does STA support QOS? We need to propagate this info from
1077 * hostapd. Currently not that important since this is only used for
1078 * sending the correct flavor of null-data packet in response to a
1079 * trigger.
1080 */
1081 cmd->wmm = 0;
1082
1083 cmd->supported_rates = cpu_to_le32(wl1271_tx_enabled_rates_get(wl,
1084 sta->supp_rates[wl->band]));
1085
1086 wl1271_debug(DEBUG_CMD, "new sta rates: 0x%x", cmd->supported_rates);
1087
1088 ret = wl1271_cmd_send(wl, CMD_ADD_STA, cmd, sizeof(*cmd), 0);
1089 if (ret < 0) {
1090 wl1271_error("failed to initiate cmd add sta");
1091 goto out_free;
1092 }
1093
1094out_free:
1095 kfree(cmd);
1096
1097out:
1098 return ret;
1099}
1100
1101int wl1271_cmd_remove_sta(struct wl1271 *wl, u8 hlid)
1102{
1103 struct wl1271_cmd_remove_sta *cmd;
1104 int ret;
1105
1106 wl1271_debug(DEBUG_CMD, "cmd remove sta %d", (int)hlid);
1107
1108 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1109 if (!cmd) {
1110 ret = -ENOMEM;
1111 goto out;
1112 }
1113
1114 cmd->hlid = hlid;
1115 /* We never send a deauth, mac80211 is in charge of this */
1116 cmd->reason_opcode = 0;
1117 cmd->send_deauth_flag = 0;
1118
1119 ret = wl1271_cmd_send(wl, CMD_REMOVE_STA, cmd, sizeof(*cmd), 0);
1120 if (ret < 0) {
1121 wl1271_error("failed to initiate cmd remove sta");
1122 goto out_free;
1123 }
1124
1125 /*
1126 * We are ok with a timeout here. The event is sometimes not sent
1127 * due to a firmware bug.
1128 */
1129 wl1271_cmd_wait_for_event_or_timeout(wl, STA_REMOVE_COMPLETE_EVENT_ID);
1130
1131out_free:
1132 kfree(cmd);
1133
1134out:
1135 return ret;
1136}
diff --git a/drivers/net/wireless/wl12xx/cmd.h b/drivers/net/wireless/wl12xx/cmd.h
index 2a1d9db7ceb..54c12e71417 100644
--- a/drivers/net/wireless/wl12xx/cmd.h
+++ b/drivers/net/wireless/wl12xx/cmd.h
@@ -39,7 +39,7 @@ int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
39int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len); 39int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len);
40int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len); 40int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len);
41int wl1271_cmd_data_path(struct wl1271 *wl, bool enable); 41int wl1271_cmd_data_path(struct wl1271 *wl, bool enable);
42int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, u32 rates, bool send); 42int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode);
43int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer, 43int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer,
44 size_t len); 44 size_t len);
45int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id, 45int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
@@ -54,12 +54,20 @@ struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
54int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr); 54int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr);
55int wl1271_build_qos_null_data(struct wl1271 *wl); 55int wl1271_build_qos_null_data(struct wl1271 *wl);
56int wl1271_cmd_build_klv_null_data(struct wl1271 *wl); 56int wl1271_cmd_build_klv_null_data(struct wl1271 *wl);
57int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id); 57int wl1271_cmd_set_sta_default_wep_key(struct wl1271 *wl, u8 id);
58int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, 58int wl1271_cmd_set_ap_default_wep_key(struct wl1271 *wl, u8 id);
59 u8 key_size, const u8 *key, const u8 *addr, 59int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
60 u32 tx_seq_32, u16 tx_seq_16); 60 u8 key_size, const u8 *key, const u8 *addr,
61 u32 tx_seq_32, u16 tx_seq_16);
62int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
63 u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
64 u16 tx_seq_16);
61int wl1271_cmd_disconnect(struct wl1271 *wl); 65int wl1271_cmd_disconnect(struct wl1271 *wl);
62int wl1271_cmd_set_sta_state(struct wl1271 *wl); 66int wl1271_cmd_set_sta_state(struct wl1271 *wl);
67int wl1271_cmd_start_bss(struct wl1271 *wl);
68int wl1271_cmd_stop_bss(struct wl1271 *wl);
69int wl1271_cmd_add_sta(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid);
70int wl1271_cmd_remove_sta(struct wl1271 *wl, u8 hlid);
63 71
64enum wl1271_commands { 72enum wl1271_commands {
65 CMD_INTERROGATE = 1, /*use this to read information elements*/ 73 CMD_INTERROGATE = 1, /*use this to read information elements*/
@@ -98,6 +106,12 @@ enum wl1271_commands {
98 CMD_STOP_PERIODIC_SCAN = 51, 106 CMD_STOP_PERIODIC_SCAN = 51,
99 CMD_SET_STA_STATE = 52, 107 CMD_SET_STA_STATE = 52,
100 108
109 /* AP mode commands */
110 CMD_BSS_START = 60,
111 CMD_BSS_STOP = 61,
112 CMD_ADD_STA = 62,
113 CMD_REMOVE_STA = 63,
114
101 NUM_COMMANDS, 115 NUM_COMMANDS,
102 MAX_COMMAND_ID = 0xFFFF, 116 MAX_COMMAND_ID = 0xFFFF,
103}; 117};
@@ -126,6 +140,14 @@ enum cmd_templ {
126 * For CTS-to-self (FastCTS) mechanism 140 * For CTS-to-self (FastCTS) mechanism
127 * for BT/WLAN coexistence (SoftGemini). */ 141 * for BT/WLAN coexistence (SoftGemini). */
128 CMD_TEMPL_ARP_RSP, 142 CMD_TEMPL_ARP_RSP,
143 CMD_TEMPL_LINK_MEASUREMENT_REPORT,
144
145 /* AP-mode specific */
146 CMD_TEMPL_AP_BEACON = 13,
147 CMD_TEMPL_AP_PROBE_RESPONSE,
148 CMD_TEMPL_AP_ARP_RSP,
149 CMD_TEMPL_DEAUTH_AP,
150
129 CMD_TEMPL_MAX = 0xff 151 CMD_TEMPL_MAX = 0xff
130}; 152};
131 153
@@ -195,6 +217,7 @@ struct wl1271_cmd_join {
195 * ACK or CTS frames). 217 * ACK or CTS frames).
196 */ 218 */
197 __le32 basic_rate_set; 219 __le32 basic_rate_set;
220 __le32 supported_rate_set;
198 u8 dtim_interval; 221 u8 dtim_interval;
199 /* 222 /*
200 * bits 0-2: This bitwise field specifies the type 223 * bits 0-2: This bitwise field specifies the type
@@ -257,20 +280,11 @@ struct wl1271_cmd_ps_params {
257 struct wl1271_cmd_header header; 280 struct wl1271_cmd_header header;
258 281
259 u8 ps_mode; /* STATION_* */ 282 u8 ps_mode; /* STATION_* */
260 u8 send_null_data; /* Do we have to send NULL data packet ? */ 283 u8 padding[3];
261 u8 retries; /* Number of retires for the initial NULL data packet */
262
263 /*
264 * TUs during which the target stays awake after switching
265 * to power save mode.
266 */
267 u8 hang_over_period;
268 __le32 null_data_rate;
269} __packed; 284} __packed;
270 285
271/* HW encryption keys */ 286/* HW encryption keys */
272#define NUM_ACCESS_CATEGORIES_COPY 4 287#define NUM_ACCESS_CATEGORIES_COPY 4
273#define MAX_KEY_SIZE 32
274 288
275enum wl1271_cmd_key_action { 289enum wl1271_cmd_key_action {
276 KEY_ADD_OR_REPLACE = 1, 290 KEY_ADD_OR_REPLACE = 1,
@@ -289,7 +303,7 @@ enum wl1271_cmd_key_type {
289 303
290/* FIXME: Add description for key-types */ 304/* FIXME: Add description for key-types */
291 305
292struct wl1271_cmd_set_keys { 306struct wl1271_cmd_set_sta_keys {
293 struct wl1271_cmd_header header; 307 struct wl1271_cmd_header header;
294 308
295 /* Ignored for default WEP key */ 309 /* Ignored for default WEP key */
@@ -318,6 +332,57 @@ struct wl1271_cmd_set_keys {
318 __le32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY]; 332 __le32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY];
319} __packed; 333} __packed;
320 334
335enum wl1271_cmd_lid_key_type {
336 UNICAST_LID_TYPE = 0,
337 BROADCAST_LID_TYPE = 1,
338 WEP_DEFAULT_LID_TYPE = 2
339};
340
341struct wl1271_cmd_set_ap_keys {
342 struct wl1271_cmd_header header;
343
344 /*
345 * Indicates whether the HLID is a unicast key set
346 * or broadcast key set. A special value 0xFF is
347 * used to indicate that the HLID is on WEP-default
348 * (multi-hlids). of type wl1271_cmd_lid_key_type.
349 */
350 u8 hlid;
351
352 /*
353 * In WEP-default network (hlid == 0xFF) used to
354 * indicate which network STA/IBSS/AP role should be
355 * changed
356 */
357 u8 lid_key_type;
358
359 /*
360 * Key ID - For TKIP and AES key types, this field
361 * indicates the value that should be inserted into
362 * the KeyID field of frames transmitted using this
363 * key entry. For broadcast keys the index use as a
364 * marker for TX/RX key.
365 * For WEP default network (HLID=0xFF), this field
366 * indicates the ID of the key to add or remove.
367 */
368 u8 key_id;
369 u8 reserved_1;
370
371 /* key_action_e */
372 __le16 key_action;
373
374 /* key size in bytes */
375 u8 key_size;
376
377 /* key_type_e */
378 u8 key_type;
379
380 /* This field holds the security key data to add to the STA table */
381 u8 key[MAX_KEY_SIZE];
382 __le16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY];
383 __le32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY];
384} __packed;
385
321struct wl1271_cmd_test_header { 386struct wl1271_cmd_test_header {
322 u8 id; 387 u8 id;
323 u8 padding[3]; 388 u8 padding[3];
@@ -412,4 +477,68 @@ struct wl1271_cmd_set_sta_state {
412 u8 padding[3]; 477 u8 padding[3];
413} __packed; 478} __packed;
414 479
480enum wl1271_ssid_type {
481 SSID_TYPE_PUBLIC = 0,
482 SSID_TYPE_HIDDEN = 1
483};
484
485struct wl1271_cmd_bss_start {
486 struct wl1271_cmd_header header;
487
488 /* wl1271_ssid_type */
489 u8 ssid_type;
490 u8 ssid_len;
491 u8 ssid[IW_ESSID_MAX_SIZE];
492 u8 padding_1[2];
493
494 /* Basic rate set */
495 __le32 basic_rate_set;
496 /* Aging period in seconds*/
497 __le16 aging_period;
498
499 /*
500 * This field specifies the time between target beacon
501 * transmission times (TBTTs), in time units (TUs).
502 * Valid values are 1 to 1024.
503 */
504 __le16 beacon_interval;
505 u8 bssid[ETH_ALEN];
506 u8 bss_index;
507 /* Radio band */
508 u8 band;
509 u8 channel;
510 /* The host link id for the AP's global queue */
511 u8 global_hlid;
512 /* The host link id for the AP's broadcast queue */
513 u8 broadcast_hlid;
514 /* DTIM count */
515 u8 dtim_interval;
516 /* Beacon expiry time in ms */
517 u8 beacon_expiry;
518 u8 padding_2[3];
519} __packed;
520
521struct wl1271_cmd_add_sta {
522 struct wl1271_cmd_header header;
523
524 u8 addr[ETH_ALEN];
525 u8 hlid;
526 u8 aid;
527 u8 psd_type[NUM_ACCESS_CATEGORIES_COPY];
528 __le32 supported_rates;
529 u8 bss_index;
530 u8 sp_len;
531 u8 wmm;
532 u8 padding1;
533} __packed;
534
535struct wl1271_cmd_remove_sta {
536 struct wl1271_cmd_header header;
537
538 u8 hlid;
539 u8 reason_opcode;
540 u8 send_deauth_flag;
541 u8 padding1;
542} __packed;
543
415#endif /* __WL1271_CMD_H__ */ 544#endif /* __WL1271_CMD_H__ */
diff --git a/drivers/net/wireless/wl12xx/conf.h b/drivers/net/wireless/wl12xx/conf.h
index a16b3616e43..856a8a2fff4 100644
--- a/drivers/net/wireless/wl12xx/conf.h
+++ b/drivers/net/wireless/wl12xx/conf.h
@@ -496,6 +496,26 @@ struct conf_rx_settings {
496 CONF_HW_BIT_RATE_2MBPS) 496 CONF_HW_BIT_RATE_2MBPS)
497#define CONF_TX_RATE_RETRY_LIMIT 10 497#define CONF_TX_RATE_RETRY_LIMIT 10
498 498
499/*
500 * Rates supported for data packets when operating as AP. Note the absense
501 * of the 22Mbps rate. There is a FW limitation on 12 rates so we must drop
502 * one. The rate dropped is not mandatory under any operating mode.
503 */
504#define CONF_TX_AP_ENABLED_RATES (CONF_HW_BIT_RATE_1MBPS | \
505 CONF_HW_BIT_RATE_2MBPS | CONF_HW_BIT_RATE_5_5MBPS | \
506 CONF_HW_BIT_RATE_6MBPS | CONF_HW_BIT_RATE_9MBPS | \
507 CONF_HW_BIT_RATE_11MBPS | CONF_HW_BIT_RATE_12MBPS | \
508 CONF_HW_BIT_RATE_18MBPS | CONF_HW_BIT_RATE_24MBPS | \
509 CONF_HW_BIT_RATE_36MBPS | CONF_HW_BIT_RATE_48MBPS | \
510 CONF_HW_BIT_RATE_54MBPS)
511
512/*
513 * Default rates for management traffic when operating in AP mode. This
514 * should be configured according to the basic rate set of the AP
515 */
516#define CONF_TX_AP_DEFAULT_MGMT_RATES (CONF_HW_BIT_RATE_1MBPS | \
517 CONF_HW_BIT_RATE_2MBPS | CONF_HW_BIT_RATE_5_5MBPS)
518
499struct conf_tx_rate_class { 519struct conf_tx_rate_class {
500 520
501 /* 521 /*
@@ -636,9 +656,9 @@ struct conf_tx_settings {
636 656
637 /* 657 /*
638 * Configuration for rate classes for TX (currently only one 658 * Configuration for rate classes for TX (currently only one
639 * rate class supported.) 659 * rate class supported). Used in non-AP mode.
640 */ 660 */
641 struct conf_tx_rate_class rc_conf; 661 struct conf_tx_rate_class sta_rc_conf;
642 662
643 /* 663 /*
644 * Configuration for access categories for TX rate control. 664 * Configuration for access categories for TX rate control.
@@ -647,6 +667,28 @@ struct conf_tx_settings {
647 struct conf_tx_ac_category ac_conf[CONF_TX_MAX_AC_COUNT]; 667 struct conf_tx_ac_category ac_conf[CONF_TX_MAX_AC_COUNT];
648 668
649 /* 669 /*
670 * Configuration for rate classes in AP-mode. These rate classes
671 * are for the AC TX queues
672 */
673 struct conf_tx_rate_class ap_rc_conf[CONF_TX_MAX_AC_COUNT];
674
675 /*
676 * Management TX rate class for AP-mode.
677 */
678 struct conf_tx_rate_class ap_mgmt_conf;
679
680 /*
681 * Broadcast TX rate class for AP-mode.
682 */
683 struct conf_tx_rate_class ap_bcst_conf;
684
685 /*
686 * AP-mode - allow this number of TX retries to a station before an
687 * event is triggered from FW.
688 */
689 u16 ap_max_tx_retries;
690
691 /*
650 * Configuration for TID parameters. 692 * Configuration for TID parameters.
651 */ 693 */
652 u8 tid_conf_count; 694 u8 tid_conf_count;
@@ -687,6 +729,12 @@ struct conf_tx_settings {
687 * Range: CONF_HW_BIT_RATE_* bit mask 729 * Range: CONF_HW_BIT_RATE_* bit mask
688 */ 730 */
689 u32 basic_rate_5; 731 u32 basic_rate_5;
732
733 /*
734 * TX retry limits for templates
735 */
736 u8 tmpl_short_retry_limit;
737 u8 tmpl_long_retry_limit;
690}; 738};
691 739
692enum { 740enum {
@@ -912,6 +960,14 @@ struct conf_conn_settings {
912 u8 psm_entry_retries; 960 u8 psm_entry_retries;
913 961
914 /* 962 /*
963 * Specifies the maximum number of times to try PSM exit if it fails
964 * (if sending the appropriate null-func message fails.)
965 *
966 * Range 0 - 255
967 */
968 u8 psm_exit_retries;
969
970 /*
915 * Specifies the maximum number of times to try transmit the PSM entry 971 * Specifies the maximum number of times to try transmit the PSM entry
916 * null-func frame for each PSM entry attempt 972 * null-func frame for each PSM entry attempt
917 * 973 *
@@ -1036,30 +1092,30 @@ struct conf_scan_settings {
1036 /* 1092 /*
1037 * The minimum time to wait on each channel for active scans 1093 * The minimum time to wait on each channel for active scans
1038 * 1094 *
1039 * Range: 0 - 65536 tu 1095 * Range: u32 tu/1000
1040 */ 1096 */
1041 u16 min_dwell_time_active; 1097 u32 min_dwell_time_active;
1042 1098
1043 /* 1099 /*
1044 * The maximum time to wait on each channel for active scans 1100 * The maximum time to wait on each channel for active scans
1045 * 1101 *
1046 * Range: 0 - 65536 tu 1102 * Range: u32 tu/1000
1047 */ 1103 */
1048 u16 max_dwell_time_active; 1104 u32 max_dwell_time_active;
1049 1105
1050 /* 1106 /*
1051 * The maximum time to wait on each channel for passive scans 1107 * The minimum time to wait on each channel for passive scans
1052 * 1108 *
1053 * Range: 0 - 65536 tu 1109 * Range: u32 tu/1000
1054 */ 1110 */
1055 u16 min_dwell_time_passive; 1111 u32 min_dwell_time_passive;
1056 1112
1057 /* 1113 /*
1058 * The maximum time to wait on each channel for passive scans 1114 * The maximum time to wait on each channel for passive scans
1059 * 1115 *
1060 * Range: 0 - 65536 tu 1116 * Range: u32 tu/1000
1061 */ 1117 */
1062 u16 max_dwell_time_passive; 1118 u32 max_dwell_time_passive;
1063 1119
1064 /* 1120 /*
1065 * Number of probe requests to transmit on each active scan channel 1121 * Number of probe requests to transmit on each active scan channel
@@ -1090,6 +1146,51 @@ struct conf_rf_settings {
1090 u8 tx_per_channel_power_compensation_5[CONF_TX_PWR_COMPENSATION_LEN_5]; 1146 u8 tx_per_channel_power_compensation_5[CONF_TX_PWR_COMPENSATION_LEN_5];
1091}; 1147};
1092 1148
1149struct conf_ht_setting {
1150 u16 tx_ba_win_size;
1151 u16 inactivity_timeout;
1152};
1153
1154struct conf_memory_settings {
1155 /* Number of stations supported in IBSS mode */
1156 u8 num_stations;
1157
1158 /* Number of ssid profiles used in IBSS mode */
1159 u8 ssid_profiles;
1160
1161 /* Number of memory buffers allocated to rx pool */
1162 u8 rx_block_num;
1163
1164 /* Minimum number of blocks allocated to tx pool */
1165 u8 tx_min_block_num;
1166
1167 /* Disable/Enable dynamic memory */
1168 u8 dynamic_memory;
1169
1170 /*
1171 * Minimum required free tx memory blocks in order to assure optimum
1172 * performence
1173 *
1174 * Range: 0-120
1175 */
1176 u8 min_req_tx_blocks;
1177
1178 /*
1179 * Minimum required free rx memory blocks in order to assure optimum
1180 * performence
1181 *
1182 * Range: 0-120
1183 */
1184 u8 min_req_rx_blocks;
1185
1186 /*
1187 * Minimum number of mem blocks (free+used) guaranteed for TX
1188 *
1189 * Range: 0-120
1190 */
1191 u8 tx_min;
1192};
1193
1093struct conf_drv_settings { 1194struct conf_drv_settings {
1094 struct conf_sg_settings sg; 1195 struct conf_sg_settings sg;
1095 struct conf_rx_settings rx; 1196 struct conf_rx_settings rx;
@@ -1100,6 +1201,8 @@ struct conf_drv_settings {
1100 struct conf_roam_trigger_settings roam_trigger; 1201 struct conf_roam_trigger_settings roam_trigger;
1101 struct conf_scan_settings scan; 1202 struct conf_scan_settings scan;
1102 struct conf_rf_settings rf; 1203 struct conf_rf_settings rf;
1204 struct conf_ht_setting ht;
1205 struct conf_memory_settings mem;
1103}; 1206};
1104 1207
1105#endif 1208#endif
diff --git a/drivers/net/wireless/wl12xx/debugfs.c b/drivers/net/wireless/wl12xx/debugfs.c
index ec607776015..bebfa28a171 100644
--- a/drivers/net/wireless/wl12xx/debugfs.c
+++ b/drivers/net/wireless/wl12xx/debugfs.c
@@ -261,27 +261,25 @@ static ssize_t gpio_power_write(struct file *file,
261 unsigned long value; 261 unsigned long value;
262 int ret; 262 int ret;
263 263
264 mutex_lock(&wl->mutex);
265
266 len = min(count, sizeof(buf) - 1); 264 len = min(count, sizeof(buf) - 1);
267 if (copy_from_user(buf, user_buf, len)) { 265 if (copy_from_user(buf, user_buf, len)) {
268 ret = -EFAULT; 266 return -EFAULT;
269 goto out;
270 } 267 }
271 buf[len] = '\0'; 268 buf[len] = '\0';
272 269
273 ret = strict_strtoul(buf, 0, &value); 270 ret = strict_strtoul(buf, 0, &value);
274 if (ret < 0) { 271 if (ret < 0) {
275 wl1271_warning("illegal value in gpio_power"); 272 wl1271_warning("illegal value in gpio_power");
276 goto out; 273 return -EINVAL;
277 } 274 }
278 275
276 mutex_lock(&wl->mutex);
277
279 if (value) 278 if (value)
280 wl1271_power_on(wl); 279 wl1271_power_on(wl);
281 else 280 else
282 wl1271_power_off(wl); 281 wl1271_power_off(wl);
283 282
284out:
285 mutex_unlock(&wl->mutex); 283 mutex_unlock(&wl->mutex);
286 return count; 284 return count;
287} 285}
@@ -293,12 +291,13 @@ static const struct file_operations gpio_power_ops = {
293 .llseek = default_llseek, 291 .llseek = default_llseek,
294}; 292};
295 293
296static int wl1271_debugfs_add_files(struct wl1271 *wl) 294static int wl1271_debugfs_add_files(struct wl1271 *wl,
295 struct dentry *rootdir)
297{ 296{
298 int ret = 0; 297 int ret = 0;
299 struct dentry *entry, *stats; 298 struct dentry *entry, *stats;
300 299
301 stats = debugfs_create_dir("fw-statistics", wl->rootdir); 300 stats = debugfs_create_dir("fw-statistics", rootdir);
302 if (!stats || IS_ERR(stats)) { 301 if (!stats || IS_ERR(stats)) {
303 entry = stats; 302 entry = stats;
304 goto err; 303 goto err;
@@ -395,16 +394,11 @@ static int wl1271_debugfs_add_files(struct wl1271 *wl)
395 DEBUGFS_FWSTATS_ADD(rxpipe, missed_beacon_host_int_trig_rx_data); 394 DEBUGFS_FWSTATS_ADD(rxpipe, missed_beacon_host_int_trig_rx_data);
396 DEBUGFS_FWSTATS_ADD(rxpipe, tx_xfr_host_int_trig_rx_data); 395 DEBUGFS_FWSTATS_ADD(rxpipe, tx_xfr_host_int_trig_rx_data);
397 396
398 DEBUGFS_ADD(tx_queue_len, wl->rootdir); 397 DEBUGFS_ADD(tx_queue_len, rootdir);
399 DEBUGFS_ADD(retry_count, wl->rootdir); 398 DEBUGFS_ADD(retry_count, rootdir);
400 DEBUGFS_ADD(excessive_retries, wl->rootdir); 399 DEBUGFS_ADD(excessive_retries, rootdir);
401
402 DEBUGFS_ADD(gpio_power, wl->rootdir);
403 400
404 entry = debugfs_create_x32("debug_level", 0600, wl->rootdir, 401 DEBUGFS_ADD(gpio_power, rootdir);
405 &wl12xx_debug_level);
406 if (!entry || IS_ERR(entry))
407 goto err;
408 402
409 return 0; 403 return 0;
410 404
@@ -419,7 +413,7 @@ err:
419 413
420void wl1271_debugfs_reset(struct wl1271 *wl) 414void wl1271_debugfs_reset(struct wl1271 *wl)
421{ 415{
422 if (!wl->rootdir) 416 if (!wl->stats.fw_stats)
423 return; 417 return;
424 418
425 memset(wl->stats.fw_stats, 0, sizeof(*wl->stats.fw_stats)); 419 memset(wl->stats.fw_stats, 0, sizeof(*wl->stats.fw_stats));
@@ -430,13 +424,13 @@ void wl1271_debugfs_reset(struct wl1271 *wl)
430int wl1271_debugfs_init(struct wl1271 *wl) 424int wl1271_debugfs_init(struct wl1271 *wl)
431{ 425{
432 int ret; 426 int ret;
427 struct dentry *rootdir;
433 428
434 wl->rootdir = debugfs_create_dir(KBUILD_MODNAME, 429 rootdir = debugfs_create_dir(KBUILD_MODNAME,
435 wl->hw->wiphy->debugfsdir); 430 wl->hw->wiphy->debugfsdir);
436 431
437 if (IS_ERR(wl->rootdir)) { 432 if (IS_ERR(rootdir)) {
438 ret = PTR_ERR(wl->rootdir); 433 ret = PTR_ERR(rootdir);
439 wl->rootdir = NULL;
440 goto err; 434 goto err;
441 } 435 }
442 436
@@ -450,7 +444,7 @@ int wl1271_debugfs_init(struct wl1271 *wl)
450 444
451 wl->stats.fw_stats_update = jiffies; 445 wl->stats.fw_stats_update = jiffies;
452 446
453 ret = wl1271_debugfs_add_files(wl); 447 ret = wl1271_debugfs_add_files(wl, rootdir);
454 448
455 if (ret < 0) 449 if (ret < 0)
456 goto err_file; 450 goto err_file;
@@ -462,8 +456,7 @@ err_file:
462 wl->stats.fw_stats = NULL; 456 wl->stats.fw_stats = NULL;
463 457
464err_fw: 458err_fw:
465 debugfs_remove_recursive(wl->rootdir); 459 debugfs_remove_recursive(rootdir);
466 wl->rootdir = NULL;
467 460
468err: 461err:
469 return ret; 462 return ret;
@@ -473,8 +466,4 @@ void wl1271_debugfs_exit(struct wl1271 *wl)
473{ 466{
474 kfree(wl->stats.fw_stats); 467 kfree(wl->stats.fw_stats);
475 wl->stats.fw_stats = NULL; 468 wl->stats.fw_stats = NULL;
476
477 debugfs_remove_recursive(wl->rootdir);
478 wl->rootdir = NULL;
479
480} 469}
diff --git a/drivers/net/wireless/wl12xx/event.c b/drivers/net/wireless/wl12xx/event.c
index f9146f5242f..1b170c5cc59 100644
--- a/drivers/net/wireless/wl12xx/event.c
+++ b/drivers/net/wireless/wl12xx/event.c
@@ -135,20 +135,6 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
135 /* go to extremely low power mode */ 135 /* go to extremely low power mode */
136 wl1271_ps_elp_sleep(wl); 136 wl1271_ps_elp_sleep(wl);
137 break; 137 break;
138 case EVENT_EXIT_POWER_SAVE_FAIL:
139 wl1271_debug(DEBUG_PSM, "PSM exit failed");
140
141 if (test_bit(WL1271_FLAG_PSM, &wl->flags)) {
142 wl->psm_entry_retry = 0;
143 break;
144 }
145
146 /* make sure the firmware goes to active mode - the frame to
147 be sent next will indicate to the AP, that we are active. */
148 ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
149 wl->basic_rate, false);
150 break;
151 case EVENT_EXIT_POWER_SAVE_SUCCESS:
152 default: 138 default:
153 break; 139 break;
154 } 140 }
@@ -186,6 +172,7 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
186 int ret; 172 int ret;
187 u32 vector; 173 u32 vector;
188 bool beacon_loss = false; 174 bool beacon_loss = false;
175 bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
189 176
190 wl1271_event_mbox_dump(mbox); 177 wl1271_event_mbox_dump(mbox);
191 178
@@ -218,21 +205,21 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
218 * BSS_LOSE_EVENT, beacon loss has to be reported to the stack. 205 * BSS_LOSE_EVENT, beacon loss has to be reported to the stack.
219 * 206 *
220 */ 207 */
221 if (vector & BSS_LOSE_EVENT_ID) { 208 if ((vector & BSS_LOSE_EVENT_ID) && !is_ap) {
222 wl1271_info("Beacon loss detected."); 209 wl1271_info("Beacon loss detected.");
223 210
224 /* indicate to the stack, that beacons have been lost */ 211 /* indicate to the stack, that beacons have been lost */
225 beacon_loss = true; 212 beacon_loss = true;
226 } 213 }
227 214
228 if (vector & PS_REPORT_EVENT_ID) { 215 if ((vector & PS_REPORT_EVENT_ID) && !is_ap) {
229 wl1271_debug(DEBUG_EVENT, "PS_REPORT_EVENT"); 216 wl1271_debug(DEBUG_EVENT, "PS_REPORT_EVENT");
230 ret = wl1271_event_ps_report(wl, mbox, &beacon_loss); 217 ret = wl1271_event_ps_report(wl, mbox, &beacon_loss);
231 if (ret < 0) 218 if (ret < 0)
232 return ret; 219 return ret;
233 } 220 }
234 221
235 if (vector & PSPOLL_DELIVERY_FAILURE_EVENT_ID) 222 if ((vector & PSPOLL_DELIVERY_FAILURE_EVENT_ID) && !is_ap)
236 wl1271_event_pspoll_delivery_fail(wl); 223 wl1271_event_pspoll_delivery_fail(wl);
237 224
238 if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) { 225 if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) {
diff --git a/drivers/net/wireless/wl12xx/event.h b/drivers/net/wireless/wl12xx/event.h
index 6cce0143adb..0e80886f303 100644
--- a/drivers/net/wireless/wl12xx/event.h
+++ b/drivers/net/wireless/wl12xx/event.h
@@ -59,6 +59,7 @@ enum {
59 BSS_LOSE_EVENT_ID = BIT(18), 59 BSS_LOSE_EVENT_ID = BIT(18),
60 REGAINED_BSS_EVENT_ID = BIT(19), 60 REGAINED_BSS_EVENT_ID = BIT(19),
61 ROAMING_TRIGGER_MAX_TX_RETRY_EVENT_ID = BIT(20), 61 ROAMING_TRIGGER_MAX_TX_RETRY_EVENT_ID = BIT(20),
62 STA_REMOVE_COMPLETE_EVENT_ID = BIT(21), /* AP */
62 SOFT_GEMINI_SENSE_EVENT_ID = BIT(22), 63 SOFT_GEMINI_SENSE_EVENT_ID = BIT(22),
63 SOFT_GEMINI_PREDICTION_EVENT_ID = BIT(23), 64 SOFT_GEMINI_PREDICTION_EVENT_ID = BIT(23),
64 SOFT_GEMINI_AVALANCHE_EVENT_ID = BIT(24), 65 SOFT_GEMINI_AVALANCHE_EVENT_ID = BIT(24),
@@ -74,8 +75,6 @@ enum {
74enum { 75enum {
75 EVENT_ENTER_POWER_SAVE_FAIL = 0, 76 EVENT_ENTER_POWER_SAVE_FAIL = 0,
76 EVENT_ENTER_POWER_SAVE_SUCCESS, 77 EVENT_ENTER_POWER_SAVE_SUCCESS,
77 EVENT_EXIT_POWER_SAVE_FAIL,
78 EVENT_EXIT_POWER_SAVE_SUCCESS,
79}; 78};
80 79
81struct event_debug_report { 80struct event_debug_report {
@@ -115,7 +114,12 @@ struct event_mailbox {
115 u8 scheduled_scan_status; 114 u8 scheduled_scan_status;
116 u8 ps_status; 115 u8 ps_status;
117 116
118 u8 reserved_5[29]; 117 /* AP FW only */
118 u8 hlid_removed;
119 __le16 sta_aging_status;
120 __le16 sta_tx_retry_exceeded;
121
122 u8 reserved_5[24];
119} __packed; 123} __packed;
120 124
121int wl1271_event_unmask(struct wl1271 *wl); 125int wl1271_event_unmask(struct wl1271 *wl);
diff --git a/drivers/net/wireless/wl12xx/init.c b/drivers/net/wireless/wl12xx/init.c
index 785a5304bfc..62dc9839dd3 100644
--- a/drivers/net/wireless/wl12xx/init.c
+++ b/drivers/net/wireless/wl12xx/init.c
@@ -30,27 +30,9 @@
30#include "acx.h" 30#include "acx.h"
31#include "cmd.h" 31#include "cmd.h"
32#include "reg.h" 32#include "reg.h"
33#include "tx.h"
33 34
34static int wl1271_init_hwenc_config(struct wl1271 *wl) 35int wl1271_sta_init_templates_config(struct wl1271 *wl)
35{
36 int ret;
37
38 ret = wl1271_acx_feature_cfg(wl);
39 if (ret < 0) {
40 wl1271_warning("couldn't set feature config");
41 return ret;
42 }
43
44 ret = wl1271_cmd_set_default_wep_key(wl, wl->default_key);
45 if (ret < 0) {
46 wl1271_warning("couldn't set default key");
47 return ret;
48 }
49
50 return 0;
51}
52
53int wl1271_init_templates_config(struct wl1271 *wl)
54{ 36{
55 int ret, i; 37 int ret, i;
56 38
@@ -118,6 +100,132 @@ int wl1271_init_templates_config(struct wl1271 *wl)
118 return 0; 100 return 0;
119} 101}
120 102
103static int wl1271_ap_init_deauth_template(struct wl1271 *wl)
104{
105 struct wl12xx_disconn_template *tmpl;
106 int ret;
107
108 tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
109 if (!tmpl) {
110 ret = -ENOMEM;
111 goto out;
112 }
113
114 tmpl->header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT |
115 IEEE80211_STYPE_DEAUTH);
116
117 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP,
118 tmpl, sizeof(*tmpl), 0,
119 wl1271_tx_min_rate_get(wl));
120
121out:
122 kfree(tmpl);
123 return ret;
124}
125
126static int wl1271_ap_init_null_template(struct wl1271 *wl)
127{
128 struct ieee80211_hdr_3addr *nullfunc;
129 int ret;
130
131 nullfunc = kzalloc(sizeof(*nullfunc), GFP_KERNEL);
132 if (!nullfunc) {
133 ret = -ENOMEM;
134 goto out;
135 }
136
137 nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
138 IEEE80211_STYPE_NULLFUNC |
139 IEEE80211_FCTL_FROMDS);
140
141 /* nullfunc->addr1 is filled by FW */
142
143 memcpy(nullfunc->addr2, wl->mac_addr, ETH_ALEN);
144 memcpy(nullfunc->addr3, wl->mac_addr, ETH_ALEN);
145
146 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, nullfunc,
147 sizeof(*nullfunc), 0,
148 wl1271_tx_min_rate_get(wl));
149
150out:
151 kfree(nullfunc);
152 return ret;
153}
154
155static int wl1271_ap_init_qos_null_template(struct wl1271 *wl)
156{
157 struct ieee80211_qos_hdr *qosnull;
158 int ret;
159
160 qosnull = kzalloc(sizeof(*qosnull), GFP_KERNEL);
161 if (!qosnull) {
162 ret = -ENOMEM;
163 goto out;
164 }
165
166 qosnull->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
167 IEEE80211_STYPE_QOS_NULLFUNC |
168 IEEE80211_FCTL_FROMDS);
169
170 /* qosnull->addr1 is filled by FW */
171
172 memcpy(qosnull->addr2, wl->mac_addr, ETH_ALEN);
173 memcpy(qosnull->addr3, wl->mac_addr, ETH_ALEN);
174
175 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, qosnull,
176 sizeof(*qosnull), 0,
177 wl1271_tx_min_rate_get(wl));
178
179out:
180 kfree(qosnull);
181 return ret;
182}
183
184static int wl1271_ap_init_templates_config(struct wl1271 *wl)
185{
186 int ret;
187
188 /*
189 * Put very large empty placeholders for all templates. These
190 * reserve memory for later.
191 */
192 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_PROBE_RESPONSE, NULL,
193 sizeof
194 (struct wl12xx_probe_resp_template),
195 0, WL1271_RATE_AUTOMATIC);
196 if (ret < 0)
197 return ret;
198
199 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_BEACON, NULL,
200 sizeof
201 (struct wl12xx_beacon_template),
202 0, WL1271_RATE_AUTOMATIC);
203 if (ret < 0)
204 return ret;
205
206 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP, NULL,
207 sizeof
208 (struct wl12xx_disconn_template),
209 0, WL1271_RATE_AUTOMATIC);
210 if (ret < 0)
211 return ret;
212
213 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, NULL,
214 sizeof(struct wl12xx_null_data_template),
215 0, WL1271_RATE_AUTOMATIC);
216 if (ret < 0)
217 return ret;
218
219 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, NULL,
220 sizeof
221 (struct wl12xx_qos_null_data_template),
222 0, WL1271_RATE_AUTOMATIC);
223 if (ret < 0)
224 return ret;
225
226 return 0;
227}
228
121static int wl1271_init_rx_config(struct wl1271 *wl, u32 config, u32 filter) 229static int wl1271_init_rx_config(struct wl1271 *wl, u32 config, u32 filter)
122{ 230{
123 int ret; 231 int ret;
@@ -145,10 +253,6 @@ int wl1271_init_phy_config(struct wl1271 *wl)
145 if (ret < 0) 253 if (ret < 0)
146 return ret; 254 return ret;
147 255
148 ret = wl1271_acx_group_address_tbl(wl, true, NULL, 0);
149 if (ret < 0)
150 return ret;
151
152 ret = wl1271_acx_service_period_timeout(wl); 256 ret = wl1271_acx_service_period_timeout(wl);
153 if (ret < 0) 257 if (ret < 0)
154 return ret; 258 return ret;
@@ -213,11 +317,199 @@ static int wl1271_init_beacon_broadcast(struct wl1271 *wl)
213 return 0; 317 return 0;
214} 318}
215 319
320static int wl1271_sta_hw_init(struct wl1271 *wl)
321{
322 int ret;
323
324 ret = wl1271_cmd_ext_radio_parms(wl);
325 if (ret < 0)
326 return ret;
327
328 /* PS config */
329 ret = wl1271_acx_config_ps(wl);
330 if (ret < 0)
331 return ret;
332
333 ret = wl1271_sta_init_templates_config(wl);
334 if (ret < 0)
335 return ret;
336
337 ret = wl1271_acx_group_address_tbl(wl, true, NULL, 0);
338 if (ret < 0)
339 return ret;
340
341 /* Initialize connection monitoring thresholds */
342 ret = wl1271_acx_conn_monit_params(wl, false);
343 if (ret < 0)
344 return ret;
345
346 /* Beacon filtering */
347 ret = wl1271_init_beacon_filter(wl);
348 if (ret < 0)
349 return ret;
350
351 /* Bluetooth WLAN coexistence */
352 ret = wl1271_init_pta(wl);
353 if (ret < 0)
354 return ret;
355
356 /* Beacons and broadcast settings */
357 ret = wl1271_init_beacon_broadcast(wl);
358 if (ret < 0)
359 return ret;
360
361 /* Configure for ELP power saving */
362 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
363 if (ret < 0)
364 return ret;
365
366 /* Configure rssi/snr averaging weights */
367 ret = wl1271_acx_rssi_snr_avg_weights(wl);
368 if (ret < 0)
369 return ret;
370
371 ret = wl1271_acx_sta_rate_policies(wl);
372 if (ret < 0)
373 return ret;
374
375 ret = wl1271_acx_sta_mem_cfg(wl);
376 if (ret < 0)
377 return ret;
378
379 return 0;
380}
381
382static int wl1271_sta_hw_init_post_mem(struct wl1271 *wl)
383{
384 int ret, i;
385
386 ret = wl1271_cmd_set_sta_default_wep_key(wl, wl->default_key);
387 if (ret < 0) {
388 wl1271_warning("couldn't set default key");
389 return ret;
390 }
391
392 /* disable all keep-alive templates */
393 for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
394 ret = wl1271_acx_keep_alive_config(wl, i,
395 ACX_KEEP_ALIVE_TPL_INVALID);
396 if (ret < 0)
397 return ret;
398 }
399
400 /* disable the keep-alive feature */
401 ret = wl1271_acx_keep_alive_mode(wl, false);
402 if (ret < 0)
403 return ret;
404
405 return 0;
406}
407
408static int wl1271_ap_hw_init(struct wl1271 *wl)
409{
410 int ret, i;
411
412 ret = wl1271_ap_init_templates_config(wl);
413 if (ret < 0)
414 return ret;
415
416 /* Configure for power always on */
417 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
418 if (ret < 0)
419 return ret;
420
421 /* Configure initial TX rate classes */
422 for (i = 0; i < wl->conf.tx.ac_conf_count; i++) {
423 ret = wl1271_acx_ap_rate_policy(wl,
424 &wl->conf.tx.ap_rc_conf[i], i);
425 if (ret < 0)
426 return ret;
427 }
428
429 ret = wl1271_acx_ap_rate_policy(wl,
430 &wl->conf.tx.ap_mgmt_conf,
431 ACX_TX_AP_MODE_MGMT_RATE);
432 if (ret < 0)
433 return ret;
434
435 ret = wl1271_acx_ap_rate_policy(wl,
436 &wl->conf.tx.ap_bcst_conf,
437 ACX_TX_AP_MODE_BCST_RATE);
438 if (ret < 0)
439 return ret;
440
441 ret = wl1271_acx_max_tx_retry(wl);
442 if (ret < 0)
443 return ret;
444
445 ret = wl1271_acx_ap_mem_cfg(wl);
446 if (ret < 0)
447 return ret;
448
449 return 0;
450}
451
452static int wl1271_ap_hw_init_post_mem(struct wl1271 *wl)
453{
454 int ret;
455
456 ret = wl1271_ap_init_deauth_template(wl);
457 if (ret < 0)
458 return ret;
459
460 ret = wl1271_ap_init_null_template(wl);
461 if (ret < 0)
462 return ret;
463
464 ret = wl1271_ap_init_qos_null_template(wl);
465 if (ret < 0)
466 return ret;
467
468 return 0;
469}
470
471static void wl1271_check_ba_support(struct wl1271 *wl)
472{
473 /* validate FW cose ver x.x.x.50-60.x */
474 if ((wl->chip.fw_ver[3] >= WL12XX_BA_SUPPORT_FW_COST_VER2_START) &&
475 (wl->chip.fw_ver[3] < WL12XX_BA_SUPPORT_FW_COST_VER2_END)) {
476 wl->ba_support = true;
477 return;
478 }
479
480 wl->ba_support = false;
481}
482
483static int wl1271_set_ba_policies(struct wl1271 *wl)
484{
485 u8 tid_index;
486 u8 ret = 0;
487
488 /* Reset the BA RX indicators */
489 wl->ba_rx_bitmap = 0;
490
491 /* validate that FW support BA */
492 wl1271_check_ba_support(wl);
493
494 if (wl->ba_support)
495 /* 802.11n initiator BA session setting */
496 for (tid_index = 0; tid_index < CONF_TX_MAX_TID_COUNT;
497 ++tid_index) {
498 ret = wl1271_acx_set_ba_session(wl, WLAN_BACK_INITIATOR,
499 tid_index, true);
500 if (ret < 0)
501 break;
502 }
503
504 return ret;
505}
506
216int wl1271_hw_init(struct wl1271 *wl) 507int wl1271_hw_init(struct wl1271 *wl)
217{ 508{
218 struct conf_tx_ac_category *conf_ac; 509 struct conf_tx_ac_category *conf_ac;
219 struct conf_tx_tid *conf_tid; 510 struct conf_tx_tid *conf_tid;
220 int ret, i; 511 int ret, i;
512 bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
221 513
222 ret = wl1271_cmd_general_parms(wl); 514 ret = wl1271_cmd_general_parms(wl);
223 if (ret < 0) 515 if (ret < 0)
@@ -227,12 +519,12 @@ int wl1271_hw_init(struct wl1271 *wl)
227 if (ret < 0) 519 if (ret < 0)
228 return ret; 520 return ret;
229 521
230 ret = wl1271_cmd_ext_radio_parms(wl); 522 /* Mode specific init */
231 if (ret < 0) 523 if (is_ap)
232 return ret; 524 ret = wl1271_ap_hw_init(wl);
525 else
526 ret = wl1271_sta_hw_init(wl);
233 527
234 /* Template settings */
235 ret = wl1271_init_templates_config(wl);
236 if (ret < 0) 528 if (ret < 0)
237 return ret; 529 return ret;
238 530
@@ -259,16 +551,6 @@ int wl1271_hw_init(struct wl1271 *wl)
259 if (ret < 0) 551 if (ret < 0)
260 goto out_free_memmap; 552 goto out_free_memmap;
261 553
262 /* Initialize connection monitoring thresholds */
263 ret = wl1271_acx_conn_monit_params(wl, false);
264 if (ret < 0)
265 goto out_free_memmap;
266
267 /* Beacon filtering */
268 ret = wl1271_init_beacon_filter(wl);
269 if (ret < 0)
270 goto out_free_memmap;
271
272 /* Configure TX patch complete interrupt behavior */ 554 /* Configure TX patch complete interrupt behavior */
273 ret = wl1271_acx_tx_config_options(wl); 555 ret = wl1271_acx_tx_config_options(wl);
274 if (ret < 0) 556 if (ret < 0)
@@ -279,21 +561,11 @@ int wl1271_hw_init(struct wl1271 *wl)
279 if (ret < 0) 561 if (ret < 0)
280 goto out_free_memmap; 562 goto out_free_memmap;
281 563
282 /* Bluetooth WLAN coexistence */
283 ret = wl1271_init_pta(wl);
284 if (ret < 0)
285 goto out_free_memmap;
286
287 /* Energy detection */ 564 /* Energy detection */
288 ret = wl1271_init_energy_detection(wl); 565 ret = wl1271_init_energy_detection(wl);
289 if (ret < 0) 566 if (ret < 0)
290 goto out_free_memmap; 567 goto out_free_memmap;
291 568
292 /* Beacons and boradcast settings */
293 ret = wl1271_init_beacon_broadcast(wl);
294 if (ret < 0)
295 goto out_free_memmap;
296
297 /* Default fragmentation threshold */ 569 /* Default fragmentation threshold */
298 ret = wl1271_acx_frag_threshold(wl, wl->conf.tx.frag_threshold); 570 ret = wl1271_acx_frag_threshold(wl, wl->conf.tx.frag_threshold);
299 if (ret < 0) 571 if (ret < 0)
@@ -321,23 +593,13 @@ int wl1271_hw_init(struct wl1271 *wl)
321 goto out_free_memmap; 593 goto out_free_memmap;
322 } 594 }
323 595
324 /* Configure TX rate classes */
325 ret = wl1271_acx_rate_policies(wl);
326 if (ret < 0)
327 goto out_free_memmap;
328
329 /* Enable data path */ 596 /* Enable data path */
330 ret = wl1271_cmd_data_path(wl, 1); 597 ret = wl1271_cmd_data_path(wl, 1);
331 if (ret < 0) 598 if (ret < 0)
332 goto out_free_memmap; 599 goto out_free_memmap;
333 600
334 /* Configure for ELP power saving */
335 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
336 if (ret < 0)
337 goto out_free_memmap;
338
339 /* Configure HW encryption */ 601 /* Configure HW encryption */
340 ret = wl1271_init_hwenc_config(wl); 602 ret = wl1271_acx_feature_cfg(wl);
341 if (ret < 0) 603 if (ret < 0)
342 goto out_free_memmap; 604 goto out_free_memmap;
343 605
@@ -346,21 +608,17 @@ int wl1271_hw_init(struct wl1271 *wl)
346 if (ret < 0) 608 if (ret < 0)
347 goto out_free_memmap; 609 goto out_free_memmap;
348 610
349 /* disable all keep-alive templates */ 611 /* Mode specific init - post mem init */
350 for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) { 612 if (is_ap)
351 ret = wl1271_acx_keep_alive_config(wl, i, 613 ret = wl1271_ap_hw_init_post_mem(wl);
352 ACX_KEEP_ALIVE_TPL_INVALID); 614 else
353 if (ret < 0) 615 ret = wl1271_sta_hw_init_post_mem(wl);
354 goto out_free_memmap;
355 }
356 616
357 /* disable the keep-alive feature */
358 ret = wl1271_acx_keep_alive_mode(wl, false);
359 if (ret < 0) 617 if (ret < 0)
360 goto out_free_memmap; 618 goto out_free_memmap;
361 619
362 /* Configure rssi/snr averaging weights */ 620 /* Configure initiator BA sessions policies */
363 ret = wl1271_acx_rssi_snr_avg_weights(wl); 621 ret = wl1271_set_ba_policies(wl);
364 if (ret < 0) 622 if (ret < 0)
365 goto out_free_memmap; 623 goto out_free_memmap;
366 624
diff --git a/drivers/net/wireless/wl12xx/init.h b/drivers/net/wireless/wl12xx/init.h
index 7762421f860..3a8bd3f426d 100644
--- a/drivers/net/wireless/wl12xx/init.h
+++ b/drivers/net/wireless/wl12xx/init.h
@@ -27,7 +27,7 @@
27#include "wl12xx.h" 27#include "wl12xx.h"
28 28
29int wl1271_hw_init_power_auth(struct wl1271 *wl); 29int wl1271_hw_init_power_auth(struct wl1271 *wl);
30int wl1271_init_templates_config(struct wl1271 *wl); 30int wl1271_sta_init_templates_config(struct wl1271 *wl);
31int wl1271_init_phy_config(struct wl1271 *wl); 31int wl1271_init_phy_config(struct wl1271 *wl);
32int wl1271_init_pta(struct wl1271 *wl); 32int wl1271_init_pta(struct wl1271 *wl);
33int wl1271_init_energy_detection(struct wl1271 *wl); 33int wl1271_init_energy_detection(struct wl1271 *wl);
diff --git a/drivers/net/wireless/wl12xx/main.c b/drivers/net/wireless/wl12xx/main.c
index 062247ef3ad..61dea73f5fd 100644
--- a/drivers/net/wireless/wl12xx/main.c
+++ b/drivers/net/wireless/wl12xx/main.c
@@ -116,11 +116,11 @@ static struct conf_drv_settings default_conf = {
116 }, 116 },
117 .tx = { 117 .tx = {
118 .tx_energy_detection = 0, 118 .tx_energy_detection = 0,
119 .rc_conf = { 119 .sta_rc_conf = {
120 .enabled_rates = 0, 120 .enabled_rates = 0,
121 .short_retry_limit = 10, 121 .short_retry_limit = 10,
122 .long_retry_limit = 10, 122 .long_retry_limit = 10,
123 .aflags = 0 123 .aflags = 0,
124 }, 124 },
125 .ac_conf_count = 4, 125 .ac_conf_count = 4,
126 .ac_conf = { 126 .ac_conf = {
@@ -153,6 +153,45 @@ static struct conf_drv_settings default_conf = {
153 .tx_op_limit = 1504, 153 .tx_op_limit = 1504,
154 }, 154 },
155 }, 155 },
156 .ap_rc_conf = {
157 [0] = {
158 .enabled_rates = CONF_TX_AP_ENABLED_RATES,
159 .short_retry_limit = 10,
160 .long_retry_limit = 10,
161 .aflags = 0,
162 },
163 [1] = {
164 .enabled_rates = CONF_TX_AP_ENABLED_RATES,
165 .short_retry_limit = 10,
166 .long_retry_limit = 10,
167 .aflags = 0,
168 },
169 [2] = {
170 .enabled_rates = CONF_TX_AP_ENABLED_RATES,
171 .short_retry_limit = 10,
172 .long_retry_limit = 10,
173 .aflags = 0,
174 },
175 [3] = {
176 .enabled_rates = CONF_TX_AP_ENABLED_RATES,
177 .short_retry_limit = 10,
178 .long_retry_limit = 10,
179 .aflags = 0,
180 },
181 },
182 .ap_mgmt_conf = {
183 .enabled_rates = CONF_TX_AP_DEFAULT_MGMT_RATES,
184 .short_retry_limit = 10,
185 .long_retry_limit = 10,
186 .aflags = 0,
187 },
188 .ap_bcst_conf = {
189 .enabled_rates = CONF_HW_BIT_RATE_1MBPS,
190 .short_retry_limit = 10,
191 .long_retry_limit = 10,
192 .aflags = 0,
193 },
194 .ap_max_tx_retries = 100,
156 .tid_conf_count = 4, 195 .tid_conf_count = 4,
157 .tid_conf = { 196 .tid_conf = {
158 [CONF_TX_AC_BE] = { 197 [CONF_TX_AC_BE] = {
@@ -193,6 +232,8 @@ static struct conf_drv_settings default_conf = {
193 .tx_compl_threshold = 4, 232 .tx_compl_threshold = 4,
194 .basic_rate = CONF_HW_BIT_RATE_1MBPS, 233 .basic_rate = CONF_HW_BIT_RATE_1MBPS,
195 .basic_rate_5 = CONF_HW_BIT_RATE_6MBPS, 234 .basic_rate_5 = CONF_HW_BIT_RATE_6MBPS,
235 .tmpl_short_retry_limit = 10,
236 .tmpl_long_retry_limit = 10,
196 }, 237 },
197 .conn = { 238 .conn = {
198 .wake_up_event = CONF_WAKE_UP_EVENT_DTIM, 239 .wake_up_event = CONF_WAKE_UP_EVENT_DTIM,
@@ -215,6 +256,7 @@ static struct conf_drv_settings default_conf = {
215 .bet_enable = CONF_BET_MODE_ENABLE, 256 .bet_enable = CONF_BET_MODE_ENABLE,
216 .bet_max_consecutive = 10, 257 .bet_max_consecutive = 10,
217 .psm_entry_retries = 5, 258 .psm_entry_retries = 5,
259 .psm_exit_retries = 255,
218 .psm_entry_nullfunc_retries = 3, 260 .psm_entry_nullfunc_retries = 3,
219 .psm_entry_hangover_period = 1, 261 .psm_entry_hangover_period = 1,
220 .keep_alive_interval = 55000, 262 .keep_alive_interval = 55000,
@@ -233,13 +275,13 @@ static struct conf_drv_settings default_conf = {
233 .avg_weight_rssi_beacon = 20, 275 .avg_weight_rssi_beacon = 20,
234 .avg_weight_rssi_data = 10, 276 .avg_weight_rssi_data = 10,
235 .avg_weight_snr_beacon = 20, 277 .avg_weight_snr_beacon = 20,
236 .avg_weight_snr_data = 10 278 .avg_weight_snr_data = 10,
237 }, 279 },
238 .scan = { 280 .scan = {
239 .min_dwell_time_active = 7500, 281 .min_dwell_time_active = 7500,
240 .max_dwell_time_active = 30000, 282 .max_dwell_time_active = 30000,
241 .min_dwell_time_passive = 30000, 283 .min_dwell_time_passive = 100000,
242 .max_dwell_time_passive = 60000, 284 .max_dwell_time_passive = 100000,
243 .num_probe_reqs = 2, 285 .num_probe_reqs = 2,
244 }, 286 },
245 .rf = { 287 .rf = {
@@ -252,9 +294,24 @@ static struct conf_drv_settings default_conf = {
252 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 294 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
253 }, 295 },
254 }, 296 },
297 .ht = {
298 .tx_ba_win_size = 64,
299 .inactivity_timeout = 10000,
300 },
301 .mem = {
302 .num_stations = 1,
303 .ssid_profiles = 1,
304 .rx_block_num = 70,
305 .tx_min_block_num = 40,
306 .dynamic_memory = 0,
307 .min_req_tx_blocks = 104,
308 .min_req_rx_blocks = 22,
309 .tx_min = 27,
310 }
255}; 311};
256 312
257static void __wl1271_op_remove_interface(struct wl1271 *wl); 313static void __wl1271_op_remove_interface(struct wl1271 *wl);
314static void wl1271_free_ap_keys(struct wl1271 *wl);
258 315
259 316
260static void wl1271_device_release(struct device *dev) 317static void wl1271_device_release(struct device *dev)
@@ -393,7 +450,7 @@ static int wl1271_plt_init(struct wl1271 *wl)
393 if (ret < 0) 450 if (ret < 0)
394 return ret; 451 return ret;
395 452
396 ret = wl1271_init_templates_config(wl); 453 ret = wl1271_sta_init_templates_config(wl);
397 if (ret < 0) 454 if (ret < 0)
398 return ret; 455 return ret;
399 456
@@ -477,13 +534,19 @@ static int wl1271_plt_init(struct wl1271 *wl)
477} 534}
478 535
479static void wl1271_fw_status(struct wl1271 *wl, 536static void wl1271_fw_status(struct wl1271 *wl,
480 struct wl1271_fw_status *status) 537 struct wl1271_fw_full_status *full_status)
481{ 538{
539 struct wl1271_fw_common_status *status = &full_status->common;
482 struct timespec ts; 540 struct timespec ts;
483 u32 total = 0; 541 u32 total = 0;
484 int i; 542 int i;
485 543
486 wl1271_raw_read(wl, FW_STATUS_ADDR, status, sizeof(*status), false); 544 if (wl->bss_type == BSS_TYPE_AP_BSS)
545 wl1271_raw_read(wl, FW_STATUS_ADDR, status,
546 sizeof(struct wl1271_fw_ap_status), false);
547 else
548 wl1271_raw_read(wl, FW_STATUS_ADDR, status,
549 sizeof(struct wl1271_fw_sta_status), false);
487 550
488 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, " 551 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
489 "drv_rx_counter = %d, tx_results_counter = %d)", 552 "drv_rx_counter = %d, tx_results_counter = %d)",
@@ -542,7 +605,7 @@ static void wl1271_irq_work(struct work_struct *work)
542 loopcount--; 605 loopcount--;
543 606
544 wl1271_fw_status(wl, wl->fw_status); 607 wl1271_fw_status(wl, wl->fw_status);
545 intr = le32_to_cpu(wl->fw_status->intr); 608 intr = le32_to_cpu(wl->fw_status->common.intr);
546 if (!intr) { 609 if (!intr) {
547 wl1271_debug(DEBUG_IRQ, "Zero interrupt received."); 610 wl1271_debug(DEBUG_IRQ, "Zero interrupt received.");
548 spin_lock_irqsave(&wl->wl_lock, flags); 611 spin_lock_irqsave(&wl->wl_lock, flags);
@@ -564,7 +627,7 @@ static void wl1271_irq_work(struct work_struct *work)
564 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA"); 627 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
565 628
566 /* check for tx results */ 629 /* check for tx results */
567 if (wl->fw_status->tx_results_counter != 630 if (wl->fw_status->common.tx_results_counter !=
568 (wl->tx_results_count & 0xff)) 631 (wl->tx_results_count & 0xff))
569 wl1271_tx_complete(wl); 632 wl1271_tx_complete(wl);
570 633
@@ -578,7 +641,7 @@ static void wl1271_irq_work(struct work_struct *work)
578 wl1271_tx_work_locked(wl); 641 wl1271_tx_work_locked(wl);
579 } 642 }
580 643
581 wl1271_rx(wl, wl->fw_status); 644 wl1271_rx(wl, &wl->fw_status->common);
582 } 645 }
583 646
584 if (intr & WL1271_ACX_INTR_EVENT_A) { 647 if (intr & WL1271_ACX_INTR_EVENT_A) {
@@ -616,9 +679,26 @@ out:
616static int wl1271_fetch_firmware(struct wl1271 *wl) 679static int wl1271_fetch_firmware(struct wl1271 *wl)
617{ 680{
618 const struct firmware *fw; 681 const struct firmware *fw;
682 const char *fw_name;
619 int ret; 683 int ret;
620 684
621 ret = request_firmware(&fw, WL1271_FW_NAME, wl1271_wl_to_dev(wl)); 685 switch (wl->bss_type) {
686 case BSS_TYPE_AP_BSS:
687 fw_name = WL1271_AP_FW_NAME;
688 break;
689 case BSS_TYPE_IBSS:
690 case BSS_TYPE_STA_BSS:
691 fw_name = WL1271_FW_NAME;
692 break;
693 default:
694 wl1271_error("no compatible firmware for bss_type %d",
695 wl->bss_type);
696 return -EINVAL;
697 }
698
699 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
700
701 ret = request_firmware(&fw, fw_name, wl1271_wl_to_dev(wl));
622 702
623 if (ret < 0) { 703 if (ret < 0) {
624 wl1271_error("could not get firmware: %d", ret); 704 wl1271_error("could not get firmware: %d", ret);
@@ -632,6 +712,7 @@ static int wl1271_fetch_firmware(struct wl1271 *wl)
632 goto out; 712 goto out;
633 } 713 }
634 714
715 vfree(wl->fw);
635 wl->fw_len = fw->size; 716 wl->fw_len = fw->size;
636 wl->fw = vmalloc(wl->fw_len); 717 wl->fw = vmalloc(wl->fw_len);
637 718
@@ -642,7 +723,7 @@ static int wl1271_fetch_firmware(struct wl1271 *wl)
642 } 723 }
643 724
644 memcpy(wl->fw, fw->data, wl->fw_len); 725 memcpy(wl->fw, fw->data, wl->fw_len);
645 726 wl->fw_bss_type = wl->bss_type;
646 ret = 0; 727 ret = 0;
647 728
648out: 729out:
@@ -778,7 +859,8 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
778 goto out; 859 goto out;
779 } 860 }
780 861
781 if (wl->fw == NULL) { 862 /* Make sure the firmware type matches the BSS type */
863 if (wl->fw == NULL || wl->fw_bss_type != wl->bss_type) {
782 ret = wl1271_fetch_firmware(wl); 864 ret = wl1271_fetch_firmware(wl);
783 if (ret < 0) 865 if (ret < 0)
784 goto out; 866 goto out;
@@ -811,6 +893,8 @@ int wl1271_plt_start(struct wl1271 *wl)
811 goto out; 893 goto out;
812 } 894 }
813 895
896 wl->bss_type = BSS_TYPE_STA_BSS;
897
814 while (retries) { 898 while (retries) {
815 retries--; 899 retries--;
816 ret = wl1271_chip_wakeup(wl); 900 ret = wl1271_chip_wakeup(wl);
@@ -827,7 +911,7 @@ int wl1271_plt_start(struct wl1271 *wl)
827 911
828 wl->state = WL1271_STATE_PLT; 912 wl->state = WL1271_STATE_PLT;
829 wl1271_notice("firmware booted in PLT mode (%s)", 913 wl1271_notice("firmware booted in PLT mode (%s)",
830 wl->chip.fw_ver); 914 wl->chip.fw_ver_str);
831 goto out; 915 goto out;
832 916
833irq_disable: 917irq_disable:
@@ -854,12 +938,10 @@ out:
854 return ret; 938 return ret;
855} 939}
856 940
857int wl1271_plt_stop(struct wl1271 *wl) 941int __wl1271_plt_stop(struct wl1271 *wl)
858{ 942{
859 int ret = 0; 943 int ret = 0;
860 944
861 mutex_lock(&wl->mutex);
862
863 wl1271_notice("power down"); 945 wl1271_notice("power down");
864 946
865 if (wl->state != WL1271_STATE_PLT) { 947 if (wl->state != WL1271_STATE_PLT) {
@@ -875,50 +957,31 @@ int wl1271_plt_stop(struct wl1271 *wl)
875 wl->state = WL1271_STATE_OFF; 957 wl->state = WL1271_STATE_OFF;
876 wl->rx_counter = 0; 958 wl->rx_counter = 0;
877 959
878out:
879 mutex_unlock(&wl->mutex); 960 mutex_unlock(&wl->mutex);
880
881 cancel_work_sync(&wl->irq_work); 961 cancel_work_sync(&wl->irq_work);
882 cancel_work_sync(&wl->recovery_work); 962 cancel_work_sync(&wl->recovery_work);
963 mutex_lock(&wl->mutex);
964out:
965 return ret;
966}
883 967
968int wl1271_plt_stop(struct wl1271 *wl)
969{
970 int ret;
971
972 mutex_lock(&wl->mutex);
973 ret = __wl1271_plt_stop(wl);
974 mutex_unlock(&wl->mutex);
884 return ret; 975 return ret;
885} 976}
886 977
887static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 978static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
888{ 979{
889 struct wl1271 *wl = hw->priv; 980 struct wl1271 *wl = hw->priv;
890 struct ieee80211_conf *conf = &hw->conf;
891 struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
892 struct ieee80211_sta *sta = txinfo->control.sta;
893 unsigned long flags; 981 unsigned long flags;
894 int q; 982 int q;
895 983
896 /*
897 * peek into the rates configured in the STA entry.
898 * The rates set after connection stage, The first block only BG sets:
899 * the compare is for bit 0-16 of sta_rate_set. The second block add
900 * HT rates in case of HT supported.
901 */
902 spin_lock_irqsave(&wl->wl_lock, flags); 984 spin_lock_irqsave(&wl->wl_lock, flags);
903 if (sta &&
904 (sta->supp_rates[conf->channel->band] !=
905 (wl->sta_rate_set & HW_BG_RATES_MASK))) {
906 wl->sta_rate_set = sta->supp_rates[conf->channel->band];
907 set_bit(WL1271_FLAG_STA_RATES_CHANGED, &wl->flags);
908 }
909
910#ifdef CONFIG_WL12XX_HT
911 if (sta &&
912 sta->ht_cap.ht_supported &&
913 ((wl->sta_rate_set >> HW_HT_RATES_OFFSET) !=
914 sta->ht_cap.mcs.rx_mask[0])) {
915 /* Clean MCS bits before setting them */
916 wl->sta_rate_set &= HW_BG_RATES_MASK;
917 wl->sta_rate_set |=
918 (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET);
919 set_bit(WL1271_FLAG_STA_RATES_CHANGED, &wl->flags);
920 }
921#endif
922 wl->tx_queue_count++; 985 wl->tx_queue_count++;
923 spin_unlock_irqrestore(&wl->wl_lock, flags); 986 spin_unlock_irqrestore(&wl->wl_lock, flags);
924 987
@@ -967,6 +1030,9 @@ static int wl1271_op_start(struct ieee80211_hw *hw)
967 * 1030 *
968 * The MAC address is first known when the corresponding interface 1031 * The MAC address is first known when the corresponding interface
969 * is added. That is where we will initialize the hardware. 1032 * is added. That is where we will initialize the hardware.
1033 *
1034 * In addition, we currently have different firmwares for AP and managed
1035 * operation. We will know which to boot according to interface type.
970 */ 1036 */
971 1037
972 return 0; 1038 return 0;
@@ -1006,6 +1072,9 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
1006 wl->bss_type = BSS_TYPE_IBSS; 1072 wl->bss_type = BSS_TYPE_IBSS;
1007 wl->set_bss_type = BSS_TYPE_STA_BSS; 1073 wl->set_bss_type = BSS_TYPE_STA_BSS;
1008 break; 1074 break;
1075 case NL80211_IFTYPE_AP:
1076 wl->bss_type = BSS_TYPE_AP_BSS;
1077 break;
1009 default: 1078 default:
1010 ret = -EOPNOTSUPP; 1079 ret = -EOPNOTSUPP;
1011 goto out; 1080 goto out;
@@ -1061,11 +1130,11 @@ power_off:
1061 1130
1062 wl->vif = vif; 1131 wl->vif = vif;
1063 wl->state = WL1271_STATE_ON; 1132 wl->state = WL1271_STATE_ON;
1064 wl1271_info("firmware booted (%s)", wl->chip.fw_ver); 1133 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
1065 1134
1066 /* update hw/fw version info in wiphy struct */ 1135 /* update hw/fw version info in wiphy struct */
1067 wiphy->hw_version = wl->chip.id; 1136 wiphy->hw_version = wl->chip.id;
1068 strncpy(wiphy->fw_version, wl->chip.fw_ver, 1137 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1069 sizeof(wiphy->fw_version)); 1138 sizeof(wiphy->fw_version));
1070 1139
1071 /* 1140 /*
@@ -1147,10 +1216,11 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl)
1147 wl->time_offset = 0; 1216 wl->time_offset = 0;
1148 wl->session_counter = 0; 1217 wl->session_counter = 0;
1149 wl->rate_set = CONF_TX_RATE_MASK_BASIC; 1218 wl->rate_set = CONF_TX_RATE_MASK_BASIC;
1150 wl->sta_rate_set = 0;
1151 wl->flags = 0; 1219 wl->flags = 0;
1152 wl->vif = NULL; 1220 wl->vif = NULL;
1153 wl->filters = 0; 1221 wl->filters = 0;
1222 wl1271_free_ap_keys(wl);
1223 memset(wl->ap_hlid_map, 0, sizeof(wl->ap_hlid_map));
1154 1224
1155 for (i = 0; i < NUM_TX_QUEUES; i++) 1225 for (i = 0; i < NUM_TX_QUEUES; i++)
1156 wl->tx_blocks_freed[i] = 0; 1226 wl->tx_blocks_freed[i] = 0;
@@ -1186,8 +1256,7 @@ static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
1186 1256
1187static void wl1271_configure_filters(struct wl1271 *wl, unsigned int filters) 1257static void wl1271_configure_filters(struct wl1271 *wl, unsigned int filters)
1188{ 1258{
1189 wl->rx_config = WL1271_DEFAULT_RX_CONFIG; 1259 wl1271_set_default_filters(wl);
1190 wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
1191 1260
1192 /* combine requested filters with current filter config */ 1261 /* combine requested filters with current filter config */
1193 filters = wl->filters | filters; 1262 filters = wl->filters | filters;
@@ -1322,25 +1391,7 @@ static void wl1271_set_band_rate(struct wl1271 *wl)
1322 wl->basic_rate_set = wl->conf.tx.basic_rate_5; 1391 wl->basic_rate_set = wl->conf.tx.basic_rate_5;
1323} 1392}
1324 1393
1325static u32 wl1271_min_rate_get(struct wl1271 *wl) 1394static int wl1271_sta_handle_idle(struct wl1271 *wl, bool idle)
1326{
1327 int i;
1328 u32 rate = 0;
1329
1330 if (!wl->basic_rate_set) {
1331 WARN_ON(1);
1332 wl->basic_rate_set = wl->conf.tx.basic_rate;
1333 }
1334
1335 for (i = 0; !rate; i++) {
1336 if ((wl->basic_rate_set >> i) & 0x1)
1337 rate = 1 << i;
1338 }
1339
1340 return rate;
1341}
1342
1343static int wl1271_handle_idle(struct wl1271 *wl, bool idle)
1344{ 1395{
1345 int ret; 1396 int ret;
1346 1397
@@ -1350,9 +1401,8 @@ static int wl1271_handle_idle(struct wl1271 *wl, bool idle)
1350 if (ret < 0) 1401 if (ret < 0)
1351 goto out; 1402 goto out;
1352 } 1403 }
1353 wl->rate_set = wl1271_min_rate_get(wl); 1404 wl->rate_set = wl1271_tx_min_rate_get(wl);
1354 wl->sta_rate_set = 0; 1405 ret = wl1271_acx_sta_rate_policies(wl);
1355 ret = wl1271_acx_rate_policies(wl);
1356 if (ret < 0) 1406 if (ret < 0)
1357 goto out; 1407 goto out;
1358 ret = wl1271_acx_keep_alive_config( 1408 ret = wl1271_acx_keep_alive_config(
@@ -1381,14 +1431,17 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
1381 struct wl1271 *wl = hw->priv; 1431 struct wl1271 *wl = hw->priv;
1382 struct ieee80211_conf *conf = &hw->conf; 1432 struct ieee80211_conf *conf = &hw->conf;
1383 int channel, ret = 0; 1433 int channel, ret = 0;
1434 bool is_ap;
1384 1435
1385 channel = ieee80211_frequency_to_channel(conf->channel->center_freq); 1436 channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
1386 1437
1387 wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s", 1438 wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s"
1439 " changed 0x%x",
1388 channel, 1440 channel,
1389 conf->flags & IEEE80211_CONF_PS ? "on" : "off", 1441 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
1390 conf->power_level, 1442 conf->power_level,
1391 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use"); 1443 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
1444 changed);
1392 1445
1393 /* 1446 /*
1394 * mac80211 will go to idle nearly immediately after transmitting some 1447 * mac80211 will go to idle nearly immediately after transmitting some
@@ -1406,6 +1459,8 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
1406 goto out; 1459 goto out;
1407 } 1460 }
1408 1461
1462 is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
1463
1409 ret = wl1271_ps_elp_wakeup(wl, false); 1464 ret = wl1271_ps_elp_wakeup(wl, false);
1410 if (ret < 0) 1465 if (ret < 0)
1411 goto out; 1466 goto out;
@@ -1417,31 +1472,34 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
1417 wl->band = conf->channel->band; 1472 wl->band = conf->channel->band;
1418 wl->channel = channel; 1473 wl->channel = channel;
1419 1474
1420 /* 1475 if (!is_ap) {
1421 * FIXME: the mac80211 should really provide a fixed rate 1476 /*
1422 * to use here. for now, just use the smallest possible rate 1477 * FIXME: the mac80211 should really provide a fixed
1423 * for the band as a fixed rate for association frames and 1478 * rate to use here. for now, just use the smallest
1424 * other control messages. 1479 * possible rate for the band as a fixed rate for
1425 */ 1480 * association frames and other control messages.
1426 if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) 1481 */
1427 wl1271_set_band_rate(wl); 1482 if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
1428 1483 wl1271_set_band_rate(wl);
1429 wl->basic_rate = wl1271_min_rate_get(wl);
1430 ret = wl1271_acx_rate_policies(wl);
1431 if (ret < 0)
1432 wl1271_warning("rate policy for update channel "
1433 "failed %d", ret);
1434 1484
1435 if (test_bit(WL1271_FLAG_JOINED, &wl->flags)) { 1485 wl->basic_rate = wl1271_tx_min_rate_get(wl);
1436 ret = wl1271_join(wl, false); 1486 ret = wl1271_acx_sta_rate_policies(wl);
1437 if (ret < 0) 1487 if (ret < 0)
1438 wl1271_warning("cmd join to update channel " 1488 wl1271_warning("rate policy for channel "
1439 "failed %d", ret); 1489 "failed %d", ret);
1490
1491 if (test_bit(WL1271_FLAG_JOINED, &wl->flags)) {
1492 ret = wl1271_join(wl, false);
1493 if (ret < 0)
1494 wl1271_warning("cmd join on channel "
1495 "failed %d", ret);
1496 }
1440 } 1497 }
1441 } 1498 }
1442 1499
1443 if (changed & IEEE80211_CONF_CHANGE_IDLE) { 1500 if (changed & IEEE80211_CONF_CHANGE_IDLE && !is_ap) {
1444 ret = wl1271_handle_idle(wl, conf->flags & IEEE80211_CONF_IDLE); 1501 ret = wl1271_sta_handle_idle(wl,
1502 conf->flags & IEEE80211_CONF_IDLE);
1445 if (ret < 0) 1503 if (ret < 0)
1446 wl1271_warning("idle mode change failed %d", ret); 1504 wl1271_warning("idle mode change failed %d", ret);
1447 } 1505 }
@@ -1548,7 +1606,8 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
1548 struct wl1271 *wl = hw->priv; 1606 struct wl1271 *wl = hw->priv;
1549 int ret; 1607 int ret;
1550 1608
1551 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter"); 1609 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
1610 " total %x", changed, *total);
1552 1611
1553 mutex_lock(&wl->mutex); 1612 mutex_lock(&wl->mutex);
1554 1613
@@ -1562,15 +1621,16 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
1562 if (ret < 0) 1621 if (ret < 0)
1563 goto out; 1622 goto out;
1564 1623
1565 1624 if (wl->bss_type != BSS_TYPE_AP_BSS) {
1566 if (*total & FIF_ALLMULTI) 1625 if (*total & FIF_ALLMULTI)
1567 ret = wl1271_acx_group_address_tbl(wl, false, NULL, 0); 1626 ret = wl1271_acx_group_address_tbl(wl, false, NULL, 0);
1568 else if (fp) 1627 else if (fp)
1569 ret = wl1271_acx_group_address_tbl(wl, fp->enabled, 1628 ret = wl1271_acx_group_address_tbl(wl, fp->enabled,
1570 fp->mc_list, 1629 fp->mc_list,
1571 fp->mc_list_length); 1630 fp->mc_list_length);
1572 if (ret < 0) 1631 if (ret < 0)
1573 goto out_sleep; 1632 goto out_sleep;
1633 }
1574 1634
1575 /* determine, whether supported filter values have changed */ 1635 /* determine, whether supported filter values have changed */
1576 if (changed == 0) 1636 if (changed == 0)
@@ -1593,38 +1653,192 @@ out:
1593 kfree(fp); 1653 kfree(fp);
1594} 1654}
1595 1655
1656static int wl1271_record_ap_key(struct wl1271 *wl, u8 id, u8 key_type,
1657 u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
1658 u16 tx_seq_16)
1659{
1660 struct wl1271_ap_key *ap_key;
1661 int i;
1662
1663 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
1664
1665 if (key_size > MAX_KEY_SIZE)
1666 return -EINVAL;
1667
1668 /*
1669 * Find next free entry in ap_keys. Also check we are not replacing
1670 * an existing key.
1671 */
1672 for (i = 0; i < MAX_NUM_KEYS; i++) {
1673 if (wl->recorded_ap_keys[i] == NULL)
1674 break;
1675
1676 if (wl->recorded_ap_keys[i]->id == id) {
1677 wl1271_warning("trying to record key replacement");
1678 return -EINVAL;
1679 }
1680 }
1681
1682 if (i == MAX_NUM_KEYS)
1683 return -EBUSY;
1684
1685 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
1686 if (!ap_key)
1687 return -ENOMEM;
1688
1689 ap_key->id = id;
1690 ap_key->key_type = key_type;
1691 ap_key->key_size = key_size;
1692 memcpy(ap_key->key, key, key_size);
1693 ap_key->hlid = hlid;
1694 ap_key->tx_seq_32 = tx_seq_32;
1695 ap_key->tx_seq_16 = tx_seq_16;
1696
1697 wl->recorded_ap_keys[i] = ap_key;
1698 return 0;
1699}
1700
1701static void wl1271_free_ap_keys(struct wl1271 *wl)
1702{
1703 int i;
1704
1705 for (i = 0; i < MAX_NUM_KEYS; i++) {
1706 kfree(wl->recorded_ap_keys[i]);
1707 wl->recorded_ap_keys[i] = NULL;
1708 }
1709}
1710
1711static int wl1271_ap_init_hwenc(struct wl1271 *wl)
1712{
1713 int i, ret = 0;
1714 struct wl1271_ap_key *key;
1715 bool wep_key_added = false;
1716
1717 for (i = 0; i < MAX_NUM_KEYS; i++) {
1718 if (wl->recorded_ap_keys[i] == NULL)
1719 break;
1720
1721 key = wl->recorded_ap_keys[i];
1722 ret = wl1271_cmd_set_ap_key(wl, KEY_ADD_OR_REPLACE,
1723 key->id, key->key_type,
1724 key->key_size, key->key,
1725 key->hlid, key->tx_seq_32,
1726 key->tx_seq_16);
1727 if (ret < 0)
1728 goto out;
1729
1730 if (key->key_type == KEY_WEP)
1731 wep_key_added = true;
1732 }
1733
1734 if (wep_key_added) {
1735 ret = wl1271_cmd_set_ap_default_wep_key(wl, wl->default_key);
1736 if (ret < 0)
1737 goto out;
1738 }
1739
1740out:
1741 wl1271_free_ap_keys(wl);
1742 return ret;
1743}
1744
1745static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
1746 u8 key_size, const u8 *key, u32 tx_seq_32,
1747 u16 tx_seq_16, struct ieee80211_sta *sta)
1748{
1749 int ret;
1750 bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
1751
1752 if (is_ap) {
1753 struct wl1271_station *wl_sta;
1754 u8 hlid;
1755
1756 if (sta) {
1757 wl_sta = (struct wl1271_station *)sta->drv_priv;
1758 hlid = wl_sta->hlid;
1759 } else {
1760 hlid = WL1271_AP_BROADCAST_HLID;
1761 }
1762
1763 if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
1764 /*
1765 * We do not support removing keys after AP shutdown.
1766 * Pretend we do to make mac80211 happy.
1767 */
1768 if (action != KEY_ADD_OR_REPLACE)
1769 return 0;
1770
1771 ret = wl1271_record_ap_key(wl, id,
1772 key_type, key_size,
1773 key, hlid, tx_seq_32,
1774 tx_seq_16);
1775 } else {
1776 ret = wl1271_cmd_set_ap_key(wl, action,
1777 id, key_type, key_size,
1778 key, hlid, tx_seq_32,
1779 tx_seq_16);
1780 }
1781
1782 if (ret < 0)
1783 return ret;
1784 } else {
1785 const u8 *addr;
1786 static const u8 bcast_addr[ETH_ALEN] = {
1787 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
1788 };
1789
1790 addr = sta ? sta->addr : bcast_addr;
1791
1792 if (is_zero_ether_addr(addr)) {
1793 /* We dont support TX only encryption */
1794 return -EOPNOTSUPP;
1795 }
1796
1797 /* The wl1271 does not allow to remove unicast keys - they
1798 will be cleared automatically on next CMD_JOIN. Ignore the
1799 request silently, as we dont want the mac80211 to emit
1800 an error message. */
1801 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
1802 return 0;
1803
1804 ret = wl1271_cmd_set_sta_key(wl, action,
1805 id, key_type, key_size,
1806 key, addr, tx_seq_32,
1807 tx_seq_16);
1808 if (ret < 0)
1809 return ret;
1810
1811 /* the default WEP key needs to be configured at least once */
1812 if (key_type == KEY_WEP) {
1813 ret = wl1271_cmd_set_sta_default_wep_key(wl,
1814 wl->default_key);
1815 if (ret < 0)
1816 return ret;
1817 }
1818 }
1819
1820 return 0;
1821}
1822
1596static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 1823static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1597 struct ieee80211_vif *vif, 1824 struct ieee80211_vif *vif,
1598 struct ieee80211_sta *sta, 1825 struct ieee80211_sta *sta,
1599 struct ieee80211_key_conf *key_conf) 1826 struct ieee80211_key_conf *key_conf)
1600{ 1827{
1601 struct wl1271 *wl = hw->priv; 1828 struct wl1271 *wl = hw->priv;
1602 const u8 *addr;
1603 int ret; 1829 int ret;
1604 u32 tx_seq_32 = 0; 1830 u32 tx_seq_32 = 0;
1605 u16 tx_seq_16 = 0; 1831 u16 tx_seq_16 = 0;
1606 u8 key_type; 1832 u8 key_type;
1607 1833
1608 static const u8 bcast_addr[ETH_ALEN] =
1609 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1610
1611 wl1271_debug(DEBUG_MAC80211, "mac80211 set key"); 1834 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
1612 1835
1613 addr = sta ? sta->addr : bcast_addr; 1836 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
1614
1615 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x", cmd);
1616 wl1271_dump(DEBUG_CRYPT, "ADDR: ", addr, ETH_ALEN);
1617 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x", 1837 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
1618 key_conf->cipher, key_conf->keyidx, 1838 key_conf->cipher, key_conf->keyidx,
1619 key_conf->keylen, key_conf->flags); 1839 key_conf->keylen, key_conf->flags);
1620 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen); 1840 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
1621 1841
1622 if (is_zero_ether_addr(addr)) {
1623 /* We dont support TX only encryption */
1624 ret = -EOPNOTSUPP;
1625 goto out;
1626 }
1627
1628 mutex_lock(&wl->mutex); 1842 mutex_lock(&wl->mutex);
1629 1843
1630 if (unlikely(wl->state == WL1271_STATE_OFF)) { 1844 if (unlikely(wl->state == WL1271_STATE_OFF)) {
@@ -1671,36 +1885,21 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1671 1885
1672 switch (cmd) { 1886 switch (cmd) {
1673 case SET_KEY: 1887 case SET_KEY:
1674 ret = wl1271_cmd_set_key(wl, KEY_ADD_OR_REPLACE, 1888 ret = wl1271_set_key(wl, KEY_ADD_OR_REPLACE,
1675 key_conf->keyidx, key_type, 1889 key_conf->keyidx, key_type,
1676 key_conf->keylen, key_conf->key, 1890 key_conf->keylen, key_conf->key,
1677 addr, tx_seq_32, tx_seq_16); 1891 tx_seq_32, tx_seq_16, sta);
1678 if (ret < 0) { 1892 if (ret < 0) {
1679 wl1271_error("Could not add or replace key"); 1893 wl1271_error("Could not add or replace key");
1680 goto out_sleep; 1894 goto out_sleep;
1681 } 1895 }
1682
1683 /* the default WEP key needs to be configured at least once */
1684 if (key_type == KEY_WEP) {
1685 ret = wl1271_cmd_set_default_wep_key(wl,
1686 wl->default_key);
1687 if (ret < 0)
1688 goto out_sleep;
1689 }
1690 break; 1896 break;
1691 1897
1692 case DISABLE_KEY: 1898 case DISABLE_KEY:
1693 /* The wl1271 does not allow to remove unicast keys - they 1899 ret = wl1271_set_key(wl, KEY_REMOVE,
1694 will be cleared automatically on next CMD_JOIN. Ignore the 1900 key_conf->keyidx, key_type,
1695 request silently, as we dont want the mac80211 to emit 1901 key_conf->keylen, key_conf->key,
1696 an error message. */ 1902 0, 0, sta);
1697 if (!is_broadcast_ether_addr(addr))
1698 break;
1699
1700 ret = wl1271_cmd_set_key(wl, KEY_REMOVE,
1701 key_conf->keyidx, key_type,
1702 key_conf->keylen, key_conf->key,
1703 addr, 0, 0);
1704 if (ret < 0) { 1903 if (ret < 0) {
1705 wl1271_error("Could not remove key"); 1904 wl1271_error("Could not remove key");
1706 goto out_sleep; 1905 goto out_sleep;
@@ -1719,7 +1918,6 @@ out_sleep:
1719out_unlock: 1918out_unlock:
1720 mutex_unlock(&wl->mutex); 1919 mutex_unlock(&wl->mutex);
1721 1920
1722out:
1723 return ret; 1921 return ret;
1724} 1922}
1725 1923
@@ -1821,7 +2019,7 @@ out:
1821 return ret; 2019 return ret;
1822} 2020}
1823 2021
1824static void wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb, 2022static int wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb,
1825 int offset) 2023 int offset)
1826{ 2024{
1827 u8 *ptr = skb->data + offset; 2025 u8 *ptr = skb->data + offset;
@@ -1831,89 +2029,211 @@ static void wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb,
1831 if (ptr[0] == WLAN_EID_SSID) { 2029 if (ptr[0] == WLAN_EID_SSID) {
1832 wl->ssid_len = ptr[1]; 2030 wl->ssid_len = ptr[1];
1833 memcpy(wl->ssid, ptr+2, wl->ssid_len); 2031 memcpy(wl->ssid, ptr+2, wl->ssid_len);
1834 return; 2032 return 0;
1835 } 2033 }
1836 ptr += (ptr[1] + 2); 2034 ptr += (ptr[1] + 2);
1837 } 2035 }
2036
1838 wl1271_error("No SSID in IEs!\n"); 2037 wl1271_error("No SSID in IEs!\n");
2038 return -ENOENT;
1839} 2039}
1840 2040
1841static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw, 2041static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
1842 struct ieee80211_vif *vif,
1843 struct ieee80211_bss_conf *bss_conf, 2042 struct ieee80211_bss_conf *bss_conf,
1844 u32 changed) 2043 u32 changed)
1845{ 2044{
1846 enum wl1271_cmd_ps_mode mode; 2045 int ret = 0;
1847 struct wl1271 *wl = hw->priv;
1848 struct ieee80211_sta *sta = ieee80211_find_sta(vif, bss_conf->bssid);
1849 bool do_join = false;
1850 bool set_assoc = false;
1851 int ret;
1852 2046
1853 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed"); 2047 if (changed & BSS_CHANGED_ERP_SLOT) {
2048 if (bss_conf->use_short_slot)
2049 ret = wl1271_acx_slot(wl, SLOT_TIME_SHORT);
2050 else
2051 ret = wl1271_acx_slot(wl, SLOT_TIME_LONG);
2052 if (ret < 0) {
2053 wl1271_warning("Set slot time failed %d", ret);
2054 goto out;
2055 }
2056 }
1854 2057
1855 mutex_lock(&wl->mutex); 2058 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
2059 if (bss_conf->use_short_preamble)
2060 wl1271_acx_set_preamble(wl, ACX_PREAMBLE_SHORT);
2061 else
2062 wl1271_acx_set_preamble(wl, ACX_PREAMBLE_LONG);
2063 }
1856 2064
1857 if (unlikely(wl->state == WL1271_STATE_OFF)) 2065 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
1858 goto out; 2066 if (bss_conf->use_cts_prot)
2067 ret = wl1271_acx_cts_protect(wl, CTSPROTECT_ENABLE);
2068 else
2069 ret = wl1271_acx_cts_protect(wl, CTSPROTECT_DISABLE);
2070 if (ret < 0) {
2071 wl1271_warning("Set ctsprotect failed %d", ret);
2072 goto out;
2073 }
2074 }
1859 2075
1860 ret = wl1271_ps_elp_wakeup(wl, false); 2076out:
1861 if (ret < 0) 2077 return ret;
1862 goto out; 2078}
1863 2079
1864 if ((changed & BSS_CHANGED_BEACON_INT) && 2080static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
1865 (wl->bss_type == BSS_TYPE_IBSS)) { 2081 struct ieee80211_vif *vif,
1866 wl1271_debug(DEBUG_ADHOC, "ad-hoc beacon interval updated: %d", 2082 struct ieee80211_bss_conf *bss_conf,
2083 u32 changed)
2084{
2085 bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
2086 int ret = 0;
2087
2088 if ((changed & BSS_CHANGED_BEACON_INT)) {
2089 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
1867 bss_conf->beacon_int); 2090 bss_conf->beacon_int);
1868 2091
1869 wl->beacon_int = bss_conf->beacon_int; 2092 wl->beacon_int = bss_conf->beacon_int;
1870 do_join = true;
1871 } 2093 }
1872 2094
1873 if ((changed & BSS_CHANGED_BEACON) && 2095 if ((changed & BSS_CHANGED_BEACON)) {
1874 (wl->bss_type == BSS_TYPE_IBSS)) { 2096 struct ieee80211_hdr *hdr;
1875 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif); 2097 int ieoffset = offsetof(struct ieee80211_mgmt,
2098 u.beacon.variable);
2099 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
2100 u16 tmpl_id;
1876 2101
1877 wl1271_debug(DEBUG_ADHOC, "ad-hoc beacon updated"); 2102 if (!beacon)
2103 goto out;
1878 2104
1879 if (beacon) { 2105 wl1271_debug(DEBUG_MASTER, "beacon updated");
1880 struct ieee80211_hdr *hdr;
1881 int ieoffset = offsetof(struct ieee80211_mgmt,
1882 u.beacon.variable);
1883 2106
1884 wl1271_ssid_set(wl, beacon, ieoffset); 2107 ret = wl1271_ssid_set(wl, beacon, ieoffset);
2108 if (ret < 0) {
2109 dev_kfree_skb(beacon);
2110 goto out;
2111 }
2112 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
2113 CMD_TEMPL_BEACON;
2114 ret = wl1271_cmd_template_set(wl, tmpl_id,
2115 beacon->data,
2116 beacon->len, 0,
2117 wl1271_tx_min_rate_get(wl));
2118 if (ret < 0) {
2119 dev_kfree_skb(beacon);
2120 goto out;
2121 }
1885 2122
1886 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON, 2123 hdr = (struct ieee80211_hdr *) beacon->data;
1887 beacon->data, 2124 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
1888 beacon->len, 0, 2125 IEEE80211_STYPE_PROBE_RESP);
1889 wl1271_min_rate_get(wl)); 2126
2127 tmpl_id = is_ap ? CMD_TEMPL_AP_PROBE_RESPONSE :
2128 CMD_TEMPL_PROBE_RESPONSE;
2129 ret = wl1271_cmd_template_set(wl,
2130 tmpl_id,
2131 beacon->data,
2132 beacon->len, 0,
2133 wl1271_tx_min_rate_get(wl));
2134 dev_kfree_skb(beacon);
2135 if (ret < 0)
2136 goto out;
2137 }
1890 2138
1891 if (ret < 0) { 2139out:
1892 dev_kfree_skb(beacon); 2140 return ret;
1893 goto out_sleep; 2141}
1894 }
1895 2142
1896 hdr = (struct ieee80211_hdr *) beacon->data; 2143/* AP mode changes */
1897 hdr->frame_control = cpu_to_le16( 2144static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
1898 IEEE80211_FTYPE_MGMT | 2145 struct ieee80211_vif *vif,
1899 IEEE80211_STYPE_PROBE_RESP); 2146 struct ieee80211_bss_conf *bss_conf,
2147 u32 changed)
2148{
2149 int ret = 0;
1900 2150
1901 ret = wl1271_cmd_template_set(wl, 2151 if ((changed & BSS_CHANGED_BASIC_RATES)) {
1902 CMD_TEMPL_PROBE_RESPONSE, 2152 u32 rates = bss_conf->basic_rates;
1903 beacon->data, 2153 struct conf_tx_rate_class mgmt_rc;
1904 beacon->len, 0, 2154
1905 wl1271_min_rate_get(wl)); 2155 wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates);
1906 dev_kfree_skb(beacon); 2156 wl->basic_rate = wl1271_tx_min_rate_get(wl);
1907 if (ret < 0) 2157 wl1271_debug(DEBUG_AP, "basic rates: 0x%x",
1908 goto out_sleep; 2158 wl->basic_rate_set);
2159
2160 /* update the AP management rate policy with the new rates */
2161 mgmt_rc.enabled_rates = wl->basic_rate_set;
2162 mgmt_rc.long_retry_limit = 10;
2163 mgmt_rc.short_retry_limit = 10;
2164 mgmt_rc.aflags = 0;
2165 ret = wl1271_acx_ap_rate_policy(wl, &mgmt_rc,
2166 ACX_TX_AP_MODE_MGMT_RATE);
2167 if (ret < 0) {
2168 wl1271_error("AP mgmt policy change failed %d", ret);
2169 goto out;
2170 }
2171 }
1909 2172
1910 /* Need to update the SSID (for filtering etc) */ 2173 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
1911 do_join = true; 2174 if (ret < 0)
2175 goto out;
2176
2177 if ((changed & BSS_CHANGED_BEACON_ENABLED)) {
2178 if (bss_conf->enable_beacon) {
2179 if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
2180 ret = wl1271_cmd_start_bss(wl);
2181 if (ret < 0)
2182 goto out;
2183
2184 set_bit(WL1271_FLAG_AP_STARTED, &wl->flags);
2185 wl1271_debug(DEBUG_AP, "started AP");
2186
2187 ret = wl1271_ap_init_hwenc(wl);
2188 if (ret < 0)
2189 goto out;
2190 }
2191 } else {
2192 if (test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
2193 ret = wl1271_cmd_stop_bss(wl);
2194 if (ret < 0)
2195 goto out;
2196
2197 clear_bit(WL1271_FLAG_AP_STARTED, &wl->flags);
2198 wl1271_debug(DEBUG_AP, "stopped AP");
2199 }
1912 } 2200 }
1913 } 2201 }
1914 2202
1915 if ((changed & BSS_CHANGED_BEACON_ENABLED) && 2203 ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed);
1916 (wl->bss_type == BSS_TYPE_IBSS)) { 2204 if (ret < 0)
2205 goto out;
2206out:
2207 return;
2208}
2209
2210/* STA/IBSS mode changes */
2211static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
2212 struct ieee80211_vif *vif,
2213 struct ieee80211_bss_conf *bss_conf,
2214 u32 changed)
2215{
2216 bool do_join = false, set_assoc = false;
2217 bool is_ibss = (wl->bss_type == BSS_TYPE_IBSS);
2218 u32 sta_rate_set = 0;
2219 int ret;
2220 struct ieee80211_sta *sta;
2221
2222 if (is_ibss) {
2223 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
2224 changed);
2225 if (ret < 0)
2226 goto out;
2227 }
2228
2229 if ((changed & BSS_CHANGED_BEACON_INT) && is_ibss)
2230 do_join = true;
2231
2232 /* Need to update the SSID (for filtering etc) */
2233 if ((changed & BSS_CHANGED_BEACON) && is_ibss)
2234 do_join = true;
2235
2236 if ((changed & BSS_CHANGED_BEACON_ENABLED) && is_ibss) {
1917 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s", 2237 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
1918 bss_conf->enable_beacon ? "enabled" : "disabled"); 2238 bss_conf->enable_beacon ? "enabled" : "disabled");
1919 2239
@@ -1924,7 +2244,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1924 do_join = true; 2244 do_join = true;
1925 } 2245 }
1926 2246
1927 if (changed & BSS_CHANGED_CQM) { 2247 if ((changed & BSS_CHANGED_CQM)) {
1928 bool enable = false; 2248 bool enable = false;
1929 if (bss_conf->cqm_rssi_thold) 2249 if (bss_conf->cqm_rssi_thold)
1930 enable = true; 2250 enable = true;
@@ -1942,24 +2262,69 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1942 * and enable the BSSID filter 2262 * and enable the BSSID filter
1943 */ 2263 */
1944 memcmp(wl->bssid, bss_conf->bssid, ETH_ALEN)) { 2264 memcmp(wl->bssid, bss_conf->bssid, ETH_ALEN)) {
1945 memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN); 2265 memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
1946 2266
2267 if (!is_zero_ether_addr(wl->bssid)) {
1947 ret = wl1271_cmd_build_null_data(wl); 2268 ret = wl1271_cmd_build_null_data(wl);
1948 if (ret < 0) 2269 if (ret < 0)
1949 goto out_sleep; 2270 goto out;
1950 2271
1951 ret = wl1271_build_qos_null_data(wl); 2272 ret = wl1271_build_qos_null_data(wl);
1952 if (ret < 0) 2273 if (ret < 0)
1953 goto out_sleep; 2274 goto out;
1954 2275
1955 /* filter out all packets not from this BSSID */ 2276 /* filter out all packets not from this BSSID */
1956 wl1271_configure_filters(wl, 0); 2277 wl1271_configure_filters(wl, 0);
1957 2278
1958 /* Need to update the BSSID (for filtering etc) */ 2279 /* Need to update the BSSID (for filtering etc) */
1959 do_join = true; 2280 do_join = true;
2281 }
1960 } 2282 }
1961 2283
1962 if (changed & BSS_CHANGED_ASSOC) { 2284 rcu_read_lock();
2285 sta = ieee80211_find_sta(vif, bss_conf->bssid);
2286 if (sta) {
2287 /* save the supp_rates of the ap */
2288 sta_rate_set = sta->supp_rates[wl->hw->conf.channel->band];
2289 if (sta->ht_cap.ht_supported)
2290 sta_rate_set |=
2291 (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET);
2292
2293 /* handle new association with HT and HT information change */
2294 if ((changed & BSS_CHANGED_HT) &&
2295 (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
2296 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap,
2297 true);
2298 if (ret < 0) {
2299 wl1271_warning("Set ht cap true failed %d",
2300 ret);
2301 rcu_read_unlock();
2302 goto out;
2303 }
2304 ret = wl1271_acx_set_ht_information(wl,
2305 bss_conf->ht_operation_mode);
2306 if (ret < 0) {
2307 wl1271_warning("Set ht information failed %d",
2308 ret);
2309 rcu_read_unlock();
2310 goto out;
2311 }
2312 }
2313 /* handle new association without HT and disassociation */
2314 else if (changed & BSS_CHANGED_ASSOC) {
2315 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap,
2316 false);
2317 if (ret < 0) {
2318 wl1271_warning("Set ht cap false failed %d",
2319 ret);
2320 rcu_read_unlock();
2321 goto out;
2322 }
2323 }
2324 }
2325 rcu_read_unlock();
2326
2327 if ((changed & BSS_CHANGED_ASSOC)) {
1963 if (bss_conf->assoc) { 2328 if (bss_conf->assoc) {
1964 u32 rates; 2329 u32 rates;
1965 int ieoffset; 2330 int ieoffset;
@@ -1975,10 +2340,13 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1975 rates = bss_conf->basic_rates; 2340 rates = bss_conf->basic_rates;
1976 wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl, 2341 wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl,
1977 rates); 2342 rates);
1978 wl->basic_rate = wl1271_min_rate_get(wl); 2343 wl->basic_rate = wl1271_tx_min_rate_get(wl);
1979 ret = wl1271_acx_rate_policies(wl); 2344 if (sta_rate_set)
2345 wl->rate_set = wl1271_tx_enabled_rates_get(wl,
2346 sta_rate_set);
2347 ret = wl1271_acx_sta_rate_policies(wl);
1980 if (ret < 0) 2348 if (ret < 0)
1981 goto out_sleep; 2349 goto out;
1982 2350
1983 /* 2351 /*
1984 * with wl1271, we don't need to update the 2352 * with wl1271, we don't need to update the
@@ -1988,7 +2356,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1988 */ 2356 */
1989 ret = wl1271_cmd_build_ps_poll(wl, wl->aid); 2357 ret = wl1271_cmd_build_ps_poll(wl, wl->aid);
1990 if (ret < 0) 2358 if (ret < 0)
1991 goto out_sleep; 2359 goto out;
1992 2360
1993 /* 2361 /*
1994 * Get a template for hardware connection maintenance 2362 * Get a template for hardware connection maintenance
@@ -2002,17 +2370,19 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
2002 /* enable the connection monitoring feature */ 2370 /* enable the connection monitoring feature */
2003 ret = wl1271_acx_conn_monit_params(wl, true); 2371 ret = wl1271_acx_conn_monit_params(wl, true);
2004 if (ret < 0) 2372 if (ret < 0)
2005 goto out_sleep; 2373 goto out;
2006 2374
2007 /* If we want to go in PSM but we're not there yet */ 2375 /* If we want to go in PSM but we're not there yet */
2008 if (test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags) && 2376 if (test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags) &&
2009 !test_bit(WL1271_FLAG_PSM, &wl->flags)) { 2377 !test_bit(WL1271_FLAG_PSM, &wl->flags)) {
2378 enum wl1271_cmd_ps_mode mode;
2379
2010 mode = STATION_POWER_SAVE_MODE; 2380 mode = STATION_POWER_SAVE_MODE;
2011 ret = wl1271_ps_set_mode(wl, mode, 2381 ret = wl1271_ps_set_mode(wl, mode,
2012 wl->basic_rate, 2382 wl->basic_rate,
2013 true); 2383 true);
2014 if (ret < 0) 2384 if (ret < 0)
2015 goto out_sleep; 2385 goto out;
2016 } 2386 }
2017 } else { 2387 } else {
2018 /* use defaults when not associated */ 2388 /* use defaults when not associated */
@@ -2029,10 +2399,10 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
2029 2399
2030 /* revert back to minimum rates for the current band */ 2400 /* revert back to minimum rates for the current band */
2031 wl1271_set_band_rate(wl); 2401 wl1271_set_band_rate(wl);
2032 wl->basic_rate = wl1271_min_rate_get(wl); 2402 wl->basic_rate = wl1271_tx_min_rate_get(wl);
2033 ret = wl1271_acx_rate_policies(wl); 2403 ret = wl1271_acx_sta_rate_policies(wl);
2034 if (ret < 0) 2404 if (ret < 0)
2035 goto out_sleep; 2405 goto out;
2036 2406
2037 /* disable connection monitor features */ 2407 /* disable connection monitor features */
2038 ret = wl1271_acx_conn_monit_params(wl, false); 2408 ret = wl1271_acx_conn_monit_params(wl, false);
@@ -2040,74 +2410,17 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
2040 /* Disable the keep-alive feature */ 2410 /* Disable the keep-alive feature */
2041 ret = wl1271_acx_keep_alive_mode(wl, false); 2411 ret = wl1271_acx_keep_alive_mode(wl, false);
2042 if (ret < 0) 2412 if (ret < 0)
2043 goto out_sleep; 2413 goto out;
2044 2414
2045 /* restore the bssid filter and go to dummy bssid */ 2415 /* restore the bssid filter and go to dummy bssid */
2046 wl1271_unjoin(wl); 2416 wl1271_unjoin(wl);
2047 wl1271_dummy_join(wl); 2417 wl1271_dummy_join(wl);
2048 } 2418 }
2049
2050 } 2419 }
2051 2420
2052 if (changed & BSS_CHANGED_ERP_SLOT) { 2421 ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed);
2053 if (bss_conf->use_short_slot) 2422 if (ret < 0)
2054 ret = wl1271_acx_slot(wl, SLOT_TIME_SHORT); 2423 goto out;
2055 else
2056 ret = wl1271_acx_slot(wl, SLOT_TIME_LONG);
2057 if (ret < 0) {
2058 wl1271_warning("Set slot time failed %d", ret);
2059 goto out_sleep;
2060 }
2061 }
2062
2063 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
2064 if (bss_conf->use_short_preamble)
2065 wl1271_acx_set_preamble(wl, ACX_PREAMBLE_SHORT);
2066 else
2067 wl1271_acx_set_preamble(wl, ACX_PREAMBLE_LONG);
2068 }
2069
2070 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
2071 if (bss_conf->use_cts_prot)
2072 ret = wl1271_acx_cts_protect(wl, CTSPROTECT_ENABLE);
2073 else
2074 ret = wl1271_acx_cts_protect(wl, CTSPROTECT_DISABLE);
2075 if (ret < 0) {
2076 wl1271_warning("Set ctsprotect failed %d", ret);
2077 goto out_sleep;
2078 }
2079 }
2080
2081 /*
2082 * Takes care of: New association with HT enable,
2083 * HT information change in beacon.
2084 */
2085 if (sta &&
2086 (changed & BSS_CHANGED_HT) &&
2087 (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
2088 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true);
2089 if (ret < 0) {
2090 wl1271_warning("Set ht cap true failed %d", ret);
2091 goto out_sleep;
2092 }
2093 ret = wl1271_acx_set_ht_information(wl,
2094 bss_conf->ht_operation_mode);
2095 if (ret < 0) {
2096 wl1271_warning("Set ht information failed %d", ret);
2097 goto out_sleep;
2098 }
2099 }
2100 /*
2101 * Takes care of: New association without HT,
2102 * Disassociation.
2103 */
2104 else if (sta && (changed & BSS_CHANGED_ASSOC)) {
2105 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, false);
2106 if (ret < 0) {
2107 wl1271_warning("Set ht cap false failed %d", ret);
2108 goto out_sleep;
2109 }
2110 }
2111 2424
2112 if (changed & BSS_CHANGED_ARP_FILTER) { 2425 if (changed & BSS_CHANGED_ARP_FILTER) {
2113 __be32 addr = bss_conf->arp_addr_list[0]; 2426 __be32 addr = bss_conf->arp_addr_list[0];
@@ -2124,76 +2437,128 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
2124 ret = wl1271_cmd_build_arp_rsp(wl, addr); 2437 ret = wl1271_cmd_build_arp_rsp(wl, addr);
2125 if (ret < 0) { 2438 if (ret < 0) {
2126 wl1271_warning("build arp rsp failed: %d", ret); 2439 wl1271_warning("build arp rsp failed: %d", ret);
2127 goto out_sleep; 2440 goto out;
2128 } 2441 }
2129 2442
2130 ret = wl1271_acx_arp_ip_filter(wl, 2443 ret = wl1271_acx_arp_ip_filter(wl,
2131 (ACX_ARP_FILTER_ARP_FILTERING | 2444 ACX_ARP_FILTER_ARP_FILTERING,
2132 ACX_ARP_FILTER_AUTO_ARP),
2133 addr); 2445 addr);
2134 } else 2446 } else
2135 ret = wl1271_acx_arp_ip_filter(wl, 0, addr); 2447 ret = wl1271_acx_arp_ip_filter(wl, 0, addr);
2136 2448
2137 if (ret < 0) 2449 if (ret < 0)
2138 goto out_sleep; 2450 goto out;
2139 } 2451 }
2140 2452
2141 if (do_join) { 2453 if (do_join) {
2142 ret = wl1271_join(wl, set_assoc); 2454 ret = wl1271_join(wl, set_assoc);
2143 if (ret < 0) { 2455 if (ret < 0) {
2144 wl1271_warning("cmd join failed %d", ret); 2456 wl1271_warning("cmd join failed %d", ret);
2145 goto out_sleep; 2457 goto out;
2146 } 2458 }
2147 } 2459 }
2148 2460
2149out_sleep:
2150 wl1271_ps_elp_sleep(wl);
2151
2152out: 2461out:
2153 mutex_unlock(&wl->mutex); 2462 return;
2154} 2463}
2155 2464
2156static int wl1271_op_conf_tx(struct ieee80211_hw *hw, u16 queue, 2465static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
2157 const struct ieee80211_tx_queue_params *params) 2466 struct ieee80211_vif *vif,
2467 struct ieee80211_bss_conf *bss_conf,
2468 u32 changed)
2158{ 2469{
2159 struct wl1271 *wl = hw->priv; 2470 struct wl1271 *wl = hw->priv;
2160 u8 ps_scheme; 2471 bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
2161 int ret; 2472 int ret;
2162 2473
2163 mutex_lock(&wl->mutex); 2474 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed 0x%x",
2475 (int)changed);
2164 2476
2165 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue); 2477 mutex_lock(&wl->mutex);
2166 2478
2167 if (unlikely(wl->state == WL1271_STATE_OFF)) { 2479 if (unlikely(wl->state == WL1271_STATE_OFF))
2168 ret = -EAGAIN;
2169 goto out; 2480 goto out;
2170 }
2171 2481
2172 ret = wl1271_ps_elp_wakeup(wl, false); 2482 ret = wl1271_ps_elp_wakeup(wl, false);
2173 if (ret < 0) 2483 if (ret < 0)
2174 goto out; 2484 goto out;
2175 2485
2176 /* the txop is confed in units of 32us by the mac80211, we need us */ 2486 if (is_ap)
2177 ret = wl1271_acx_ac_cfg(wl, wl1271_tx_get_queue(queue), 2487 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
2178 params->cw_min, params->cw_max, 2488 else
2179 params->aifs, params->txop << 5); 2489 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
2180 if (ret < 0) 2490
2181 goto out_sleep; 2491 wl1271_ps_elp_sleep(wl);
2492
2493out:
2494 mutex_unlock(&wl->mutex);
2495}
2496
2497static int wl1271_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
2498 const struct ieee80211_tx_queue_params *params)
2499{
2500 struct wl1271 *wl = hw->priv;
2501 u8 ps_scheme;
2502 int ret = 0;
2503
2504 mutex_lock(&wl->mutex);
2505
2506 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
2182 2507
2183 if (params->uapsd) 2508 if (params->uapsd)
2184 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER; 2509 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
2185 else 2510 else
2186 ps_scheme = CONF_PS_SCHEME_LEGACY; 2511 ps_scheme = CONF_PS_SCHEME_LEGACY;
2187 2512
2188 ret = wl1271_acx_tid_cfg(wl, wl1271_tx_get_queue(queue), 2513 if (wl->state == WL1271_STATE_OFF) {
2189 CONF_CHANNEL_TYPE_EDCF, 2514 /*
2190 wl1271_tx_get_queue(queue), 2515 * If the state is off, the parameters will be recorded and
2191 ps_scheme, CONF_ACK_POLICY_LEGACY, 0, 0); 2516 * configured on init. This happens in AP-mode.
2192 if (ret < 0) 2517 */
2193 goto out_sleep; 2518 struct conf_tx_ac_category *conf_ac =
2519 &wl->conf.tx.ac_conf[wl1271_tx_get_queue(queue)];
2520 struct conf_tx_tid *conf_tid =
2521 &wl->conf.tx.tid_conf[wl1271_tx_get_queue(queue)];
2522
2523 conf_ac->ac = wl1271_tx_get_queue(queue);
2524 conf_ac->cw_min = (u8)params->cw_min;
2525 conf_ac->cw_max = params->cw_max;
2526 conf_ac->aifsn = params->aifs;
2527 conf_ac->tx_op_limit = params->txop << 5;
2528
2529 conf_tid->queue_id = wl1271_tx_get_queue(queue);
2530 conf_tid->channel_type = CONF_CHANNEL_TYPE_EDCF;
2531 conf_tid->tsid = wl1271_tx_get_queue(queue);
2532 conf_tid->ps_scheme = ps_scheme;
2533 conf_tid->ack_policy = CONF_ACK_POLICY_LEGACY;
2534 conf_tid->apsd_conf[0] = 0;
2535 conf_tid->apsd_conf[1] = 0;
2536 } else {
2537 ret = wl1271_ps_elp_wakeup(wl, false);
2538 if (ret < 0)
2539 goto out;
2540
2541 /*
2542 * the txop is confed in units of 32us by the mac80211,
2543 * we need us
2544 */
2545 ret = wl1271_acx_ac_cfg(wl, wl1271_tx_get_queue(queue),
2546 params->cw_min, params->cw_max,
2547 params->aifs, params->txop << 5);
2548 if (ret < 0)
2549 goto out_sleep;
2550
2551 ret = wl1271_acx_tid_cfg(wl, wl1271_tx_get_queue(queue),
2552 CONF_CHANNEL_TYPE_EDCF,
2553 wl1271_tx_get_queue(queue),
2554 ps_scheme, CONF_ACK_POLICY_LEGACY,
2555 0, 0);
2556 if (ret < 0)
2557 goto out_sleep;
2194 2558
2195out_sleep: 2559out_sleep:
2196 wl1271_ps_elp_sleep(wl); 2560 wl1271_ps_elp_sleep(wl);
2561 }
2197 2562
2198out: 2563out:
2199 mutex_unlock(&wl->mutex); 2564 mutex_unlock(&wl->mutex);
@@ -2247,6 +2612,173 @@ static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
2247 return 0; 2612 return 0;
2248} 2613}
2249 2614
2615static int wl1271_allocate_hlid(struct wl1271 *wl,
2616 struct ieee80211_sta *sta,
2617 u8 *hlid)
2618{
2619 struct wl1271_station *wl_sta;
2620 int id;
2621
2622 id = find_first_zero_bit(wl->ap_hlid_map, AP_MAX_STATIONS);
2623 if (id >= AP_MAX_STATIONS) {
2624 wl1271_warning("could not allocate HLID - too much stations");
2625 return -EBUSY;
2626 }
2627
2628 wl_sta = (struct wl1271_station *)sta->drv_priv;
2629
2630 __set_bit(id, wl->ap_hlid_map);
2631 wl_sta->hlid = WL1271_AP_STA_HLID_START + id;
2632 *hlid = wl_sta->hlid;
2633 return 0;
2634}
2635
2636static void wl1271_free_hlid(struct wl1271 *wl, u8 hlid)
2637{
2638 int id = hlid - WL1271_AP_STA_HLID_START;
2639
2640 __clear_bit(id, wl->ap_hlid_map);
2641}
2642
2643static int wl1271_op_sta_add(struct ieee80211_hw *hw,
2644 struct ieee80211_vif *vif,
2645 struct ieee80211_sta *sta)
2646{
2647 struct wl1271 *wl = hw->priv;
2648 int ret = 0;
2649 u8 hlid;
2650
2651 mutex_lock(&wl->mutex);
2652
2653 if (unlikely(wl->state == WL1271_STATE_OFF))
2654 goto out;
2655
2656 if (wl->bss_type != BSS_TYPE_AP_BSS)
2657 goto out;
2658
2659 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
2660
2661 ret = wl1271_allocate_hlid(wl, sta, &hlid);
2662 if (ret < 0)
2663 goto out;
2664
2665 ret = wl1271_ps_elp_wakeup(wl, false);
2666 if (ret < 0)
2667 goto out;
2668
2669 ret = wl1271_cmd_add_sta(wl, sta, hlid);
2670 if (ret < 0)
2671 goto out_sleep;
2672
2673out_sleep:
2674 wl1271_ps_elp_sleep(wl);
2675
2676out:
2677 mutex_unlock(&wl->mutex);
2678 return ret;
2679}
2680
2681static int wl1271_op_sta_remove(struct ieee80211_hw *hw,
2682 struct ieee80211_vif *vif,
2683 struct ieee80211_sta *sta)
2684{
2685 struct wl1271 *wl = hw->priv;
2686 struct wl1271_station *wl_sta;
2687 int ret = 0, id;
2688
2689 mutex_lock(&wl->mutex);
2690
2691 if (unlikely(wl->state == WL1271_STATE_OFF))
2692 goto out;
2693
2694 if (wl->bss_type != BSS_TYPE_AP_BSS)
2695 goto out;
2696
2697 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
2698
2699 wl_sta = (struct wl1271_station *)sta->drv_priv;
2700 id = wl_sta->hlid - WL1271_AP_STA_HLID_START;
2701 if (WARN_ON(!test_bit(id, wl->ap_hlid_map)))
2702 goto out;
2703
2704 ret = wl1271_ps_elp_wakeup(wl, false);
2705 if (ret < 0)
2706 goto out;
2707
2708 ret = wl1271_cmd_remove_sta(wl, wl_sta->hlid);
2709 if (ret < 0)
2710 goto out_sleep;
2711
2712 wl1271_free_hlid(wl, wl_sta->hlid);
2713
2714out_sleep:
2715 wl1271_ps_elp_sleep(wl);
2716
2717out:
2718 mutex_unlock(&wl->mutex);
2719 return ret;
2720}
2721
2722int wl1271_op_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2723 enum ieee80211_ampdu_mlme_action action,
2724 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
2725 u8 buf_size)
2726{
2727 struct wl1271 *wl = hw->priv;
2728 int ret;
2729
2730 mutex_lock(&wl->mutex);
2731
2732 if (unlikely(wl->state == WL1271_STATE_OFF)) {
2733 ret = -EAGAIN;
2734 goto out;
2735 }
2736
2737 ret = wl1271_ps_elp_wakeup(wl, false);
2738 if (ret < 0)
2739 goto out;
2740
2741 switch (action) {
2742 case IEEE80211_AMPDU_RX_START:
2743 if (wl->ba_support) {
2744 ret = wl1271_acx_set_ba_receiver_session(wl, tid, *ssn,
2745 true);
2746 if (!ret)
2747 wl->ba_rx_bitmap |= BIT(tid);
2748 } else {
2749 ret = -ENOTSUPP;
2750 }
2751 break;
2752
2753 case IEEE80211_AMPDU_RX_STOP:
2754 ret = wl1271_acx_set_ba_receiver_session(wl, tid, 0, false);
2755 if (!ret)
2756 wl->ba_rx_bitmap &= ~BIT(tid);
2757 break;
2758
2759 /*
2760 * The BA initiator session management in FW independently.
2761 * Falling break here on purpose for all TX APDU commands.
2762 */
2763 case IEEE80211_AMPDU_TX_START:
2764 case IEEE80211_AMPDU_TX_STOP:
2765 case IEEE80211_AMPDU_TX_OPERATIONAL:
2766 ret = -EINVAL;
2767 break;
2768
2769 default:
2770 wl1271_error("Incorrect ampdu action id=%x\n", action);
2771 ret = -EINVAL;
2772 }
2773
2774 wl1271_ps_elp_sleep(wl);
2775
2776out:
2777 mutex_unlock(&wl->mutex);
2778
2779 return ret;
2780}
2781
2250/* can't be const, mac80211 writes to this */ 2782/* can't be const, mac80211 writes to this */
2251static struct ieee80211_rate wl1271_rates[] = { 2783static struct ieee80211_rate wl1271_rates[] = {
2252 { .bitrate = 10, 2784 { .bitrate = 10,
@@ -2305,6 +2837,7 @@ static struct ieee80211_channel wl1271_channels[] = {
2305 { .hw_value = 11, .center_freq = 2462, .max_power = 25 }, 2837 { .hw_value = 11, .center_freq = 2462, .max_power = 25 },
2306 { .hw_value = 12, .center_freq = 2467, .max_power = 25 }, 2838 { .hw_value = 12, .center_freq = 2467, .max_power = 25 },
2307 { .hw_value = 13, .center_freq = 2472, .max_power = 25 }, 2839 { .hw_value = 13, .center_freq = 2472, .max_power = 25 },
2840 { .hw_value = 14, .center_freq = 2484, .max_power = 25 },
2308}; 2841};
2309 2842
2310/* mapping to indexes for wl1271_rates */ 2843/* mapping to indexes for wl1271_rates */
@@ -2493,6 +3026,9 @@ static const struct ieee80211_ops wl1271_ops = {
2493 .conf_tx = wl1271_op_conf_tx, 3026 .conf_tx = wl1271_op_conf_tx,
2494 .get_tsf = wl1271_op_get_tsf, 3027 .get_tsf = wl1271_op_get_tsf,
2495 .get_survey = wl1271_op_get_survey, 3028 .get_survey = wl1271_op_get_survey,
3029 .sta_add = wl1271_op_sta_add,
3030 .sta_remove = wl1271_op_sta_remove,
3031 .ampdu_action = wl1271_op_ampdu_action,
2496 CFG80211_TESTMODE_CMD(wl1271_tm_cmd) 3032 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
2497}; 3033};
2498 3034
@@ -2607,6 +3143,18 @@ int wl1271_register_hw(struct wl1271 *wl)
2607 if (wl->mac80211_registered) 3143 if (wl->mac80211_registered)
2608 return 0; 3144 return 0;
2609 3145
3146 ret = wl1271_fetch_nvs(wl);
3147 if (ret == 0) {
3148 u8 *nvs_ptr = (u8 *)wl->nvs->nvs;
3149
3150 wl->mac_addr[0] = nvs_ptr[11];
3151 wl->mac_addr[1] = nvs_ptr[10];
3152 wl->mac_addr[2] = nvs_ptr[6];
3153 wl->mac_addr[3] = nvs_ptr[5];
3154 wl->mac_addr[4] = nvs_ptr[4];
3155 wl->mac_addr[5] = nvs_ptr[3];
3156 }
3157
2610 SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr); 3158 SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr);
2611 3159
2612 ret = ieee80211_register_hw(wl->hw); 3160 ret = ieee80211_register_hw(wl->hw);
@@ -2629,6 +3177,9 @@ EXPORT_SYMBOL_GPL(wl1271_register_hw);
2629 3177
2630void wl1271_unregister_hw(struct wl1271 *wl) 3178void wl1271_unregister_hw(struct wl1271 *wl)
2631{ 3179{
3180 if (wl->state == WL1271_STATE_PLT)
3181 __wl1271_plt_stop(wl);
3182
2632 unregister_netdevice_notifier(&wl1271_dev_notifier); 3183 unregister_netdevice_notifier(&wl1271_dev_notifier);
2633 ieee80211_unregister_hw(wl->hw); 3184 ieee80211_unregister_hw(wl->hw);
2634 wl->mac80211_registered = false; 3185 wl->mac80211_registered = false;
@@ -2667,7 +3218,7 @@ int wl1271_init_ieee80211(struct wl1271 *wl)
2667 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); 3218 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
2668 3219
2669 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | 3220 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
2670 BIT(NL80211_IFTYPE_ADHOC); 3221 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP);
2671 wl->hw->wiphy->max_scan_ssids = 1; 3222 wl->hw->wiphy->max_scan_ssids = 1;
2672 /* 3223 /*
2673 * Maximum length of elements in scanning probe request templates 3224 * Maximum length of elements in scanning probe request templates
@@ -2676,8 +3227,20 @@ int wl1271_init_ieee80211(struct wl1271 *wl)
2676 */ 3227 */
2677 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE - 3228 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
2678 sizeof(struct ieee80211_header); 3229 sizeof(struct ieee80211_header);
2679 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1271_band_2ghz; 3230
2680 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &wl1271_band_5ghz; 3231 /*
3232 * We keep local copies of the band structs because we need to
3233 * modify them on a per-device basis.
3234 */
3235 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
3236 sizeof(wl1271_band_2ghz));
3237 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
3238 sizeof(wl1271_band_5ghz));
3239
3240 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
3241 &wl->bands[IEEE80211_BAND_2GHZ];
3242 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
3243 &wl->bands[IEEE80211_BAND_5GHZ];
2681 3244
2682 wl->hw->queues = 4; 3245 wl->hw->queues = 4;
2683 wl->hw->max_rates = 1; 3246 wl->hw->max_rates = 1;
@@ -2686,6 +3249,10 @@ int wl1271_init_ieee80211(struct wl1271 *wl)
2686 3249
2687 SET_IEEE80211_DEV(wl->hw, wl1271_wl_to_dev(wl)); 3250 SET_IEEE80211_DEV(wl->hw, wl1271_wl_to_dev(wl));
2688 3251
3252 wl->hw->sta_data_size = sizeof(struct wl1271_station);
3253
3254 wl->hw->max_rx_aggregation_subframes = 8;
3255
2689 return 0; 3256 return 0;
2690} 3257}
2691EXPORT_SYMBOL_GPL(wl1271_init_ieee80211); 3258EXPORT_SYMBOL_GPL(wl1271_init_ieee80211);
@@ -2735,19 +3302,21 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
2735 wl->beacon_int = WL1271_DEFAULT_BEACON_INT; 3302 wl->beacon_int = WL1271_DEFAULT_BEACON_INT;
2736 wl->default_key = 0; 3303 wl->default_key = 0;
2737 wl->rx_counter = 0; 3304 wl->rx_counter = 0;
2738 wl->rx_config = WL1271_DEFAULT_RX_CONFIG; 3305 wl->rx_config = WL1271_DEFAULT_STA_RX_CONFIG;
2739 wl->rx_filter = WL1271_DEFAULT_RX_FILTER; 3306 wl->rx_filter = WL1271_DEFAULT_STA_RX_FILTER;
2740 wl->psm_entry_retry = 0; 3307 wl->psm_entry_retry = 0;
2741 wl->power_level = WL1271_DEFAULT_POWER_LEVEL; 3308 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
2742 wl->basic_rate_set = CONF_TX_RATE_MASK_BASIC; 3309 wl->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2743 wl->basic_rate = CONF_TX_RATE_MASK_BASIC; 3310 wl->basic_rate = CONF_TX_RATE_MASK_BASIC;
2744 wl->rate_set = CONF_TX_RATE_MASK_BASIC; 3311 wl->rate_set = CONF_TX_RATE_MASK_BASIC;
2745 wl->sta_rate_set = 0;
2746 wl->band = IEEE80211_BAND_2GHZ; 3312 wl->band = IEEE80211_BAND_2GHZ;
2747 wl->vif = NULL; 3313 wl->vif = NULL;
2748 wl->flags = 0; 3314 wl->flags = 0;
2749 wl->sg_enabled = true; 3315 wl->sg_enabled = true;
2750 wl->hw_pg_ver = -1; 3316 wl->hw_pg_ver = -1;
3317 wl->bss_type = MAX_BSS_TYPE;
3318 wl->set_bss_type = MAX_BSS_TYPE;
3319 wl->fw_bss_type = MAX_BSS_TYPE;
2751 3320
2752 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map)); 3321 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
2753 for (i = 0; i < ACX_TX_DESCRIPTORS; i++) 3322 for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
@@ -2837,9 +3406,9 @@ int wl1271_free_hw(struct wl1271 *wl)
2837} 3406}
2838EXPORT_SYMBOL_GPL(wl1271_free_hw); 3407EXPORT_SYMBOL_GPL(wl1271_free_hw);
2839 3408
2840u32 wl12xx_debug_level; 3409u32 wl12xx_debug_level = DEBUG_NONE;
2841EXPORT_SYMBOL_GPL(wl12xx_debug_level); 3410EXPORT_SYMBOL_GPL(wl12xx_debug_level);
2842module_param_named(debug_level, wl12xx_debug_level, uint, DEBUG_NONE); 3411module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
2843MODULE_PARM_DESC(debug_level, "wl12xx debugging level"); 3412MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
2844 3413
2845MODULE_LICENSE("GPL"); 3414MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/wl12xx/ps.c b/drivers/net/wireless/wl12xx/ps.c
index 60a3738eadb..2d3086ae633 100644
--- a/drivers/net/wireless/wl12xx/ps.c
+++ b/drivers/net/wireless/wl12xx/ps.c
@@ -139,8 +139,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
139 return ret; 139 return ret;
140 } 140 }
141 141
142 ret = wl1271_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE, 142 ret = wl1271_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE);
143 rates, send);
144 if (ret < 0) 143 if (ret < 0)
145 return ret; 144 return ret;
146 145
@@ -163,8 +162,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
163 if (ret < 0) 162 if (ret < 0)
164 return ret; 163 return ret;
165 164
166 ret = wl1271_cmd_ps_mode(wl, STATION_ACTIVE_MODE, 165 ret = wl1271_cmd_ps_mode(wl, STATION_ACTIVE_MODE);
167 rates, send);
168 if (ret < 0) 166 if (ret < 0)
169 return ret; 167 return ret;
170 168
diff --git a/drivers/net/wireless/wl12xx/rx.c b/drivers/net/wireless/wl12xx/rx.c
index 682304c30b8..00d250d8da1 100644
--- a/drivers/net/wireless/wl12xx/rx.c
+++ b/drivers/net/wireless/wl12xx/rx.c
@@ -29,14 +29,14 @@
29#include "rx.h" 29#include "rx.h"
30#include "io.h" 30#include "io.h"
31 31
32static u8 wl1271_rx_get_mem_block(struct wl1271_fw_status *status, 32static u8 wl1271_rx_get_mem_block(struct wl1271_fw_common_status *status,
33 u32 drv_rx_counter) 33 u32 drv_rx_counter)
34{ 34{
35 return le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) & 35 return le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) &
36 RX_MEM_BLOCK_MASK; 36 RX_MEM_BLOCK_MASK;
37} 37}
38 38
39static u32 wl1271_rx_get_buf_size(struct wl1271_fw_status *status, 39static u32 wl1271_rx_get_buf_size(struct wl1271_fw_common_status *status,
40 u32 drv_rx_counter) 40 u32 drv_rx_counter)
41{ 41{
42 return (le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) & 42 return (le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) &
@@ -76,7 +76,7 @@ static void wl1271_rx_status(struct wl1271 *wl,
76 */ 76 */
77 wl->noise = desc->rssi - (desc->snr >> 1); 77 wl->noise = desc->rssi - (desc->snr >> 1);
78 78
79 status->freq = ieee80211_channel_to_frequency(desc->channel); 79 status->freq = ieee80211_channel_to_frequency(desc->channel, desc_band);
80 80
81 if (desc->flags & WL1271_RX_DESC_ENCRYPT_MASK) { 81 if (desc->flags & WL1271_RX_DESC_ENCRYPT_MASK) {
82 status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED; 82 status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
@@ -134,7 +134,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
134 return 0; 134 return 0;
135} 135}
136 136
137void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status) 137void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status)
138{ 138{
139 struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map; 139 struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map;
140 u32 buf_size; 140 u32 buf_size;
@@ -198,6 +198,16 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status)
198 pkt_offset += pkt_length; 198 pkt_offset += pkt_length;
199 } 199 }
200 } 200 }
201 wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, 201 wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
202 cpu_to_le32(wl->rx_counter)); 202}
203
204void wl1271_set_default_filters(struct wl1271 *wl)
205{
206 if (wl->bss_type == BSS_TYPE_AP_BSS) {
207 wl->rx_config = WL1271_DEFAULT_AP_RX_CONFIG;
208 wl->rx_filter = WL1271_DEFAULT_AP_RX_FILTER;
209 } else {
210 wl->rx_config = WL1271_DEFAULT_STA_RX_CONFIG;
211 wl->rx_filter = WL1271_DEFAULT_STA_RX_FILTER;
212 }
203} 213}
diff --git a/drivers/net/wireless/wl12xx/rx.h b/drivers/net/wireless/wl12xx/rx.h
index 3abb26fe036..4cef8fa3dee 100644
--- a/drivers/net/wireless/wl12xx/rx.h
+++ b/drivers/net/wireless/wl12xx/rx.h
@@ -86,8 +86,9 @@
86/* 86/*
87 * RX Descriptor status 87 * RX Descriptor status
88 * 88 *
89 * Bits 0-2 - status 89 * Bits 0-2 - error code
90 * Bits 3-7 - reserved 90 * Bits 3-5 - process_id tag (AP mode FW)
91 * Bits 6-7 - reserved
91 */ 92 */
92#define WL1271_RX_DESC_STATUS_MASK 0x07 93#define WL1271_RX_DESC_STATUS_MASK 0x07
93 94
@@ -110,12 +111,16 @@ struct wl1271_rx_descriptor {
110 u8 snr; 111 u8 snr;
111 __le32 timestamp; 112 __le32 timestamp;
112 u8 packet_class; 113 u8 packet_class;
113 u8 process_id; 114 union {
115 u8 process_id; /* STA FW */
116 u8 hlid; /* AP FW */
117 } __packed;
114 u8 pad_len; 118 u8 pad_len;
115 u8 reserved; 119 u8 reserved;
116} __packed; 120} __packed;
117 121
118void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status); 122void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status);
119u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band); 123u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
124void wl1271_set_default_filters(struct wl1271 *wl);
120 125
121#endif 126#endif
diff --git a/drivers/net/wireless/wl12xx/sdio.c b/drivers/net/wireless/wl12xx/sdio.c
index 93cbb8d5aba..d5e87482506 100644
--- a/drivers/net/wireless/wl12xx/sdio.c
+++ b/drivers/net/wireless/wl12xx/sdio.c
@@ -345,3 +345,4 @@ MODULE_LICENSE("GPL");
345MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>"); 345MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
346MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); 346MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
347MODULE_FIRMWARE(WL1271_FW_NAME); 347MODULE_FIRMWARE(WL1271_FW_NAME);
348MODULE_FIRMWARE(WL1271_AP_FW_NAME);
diff --git a/drivers/net/wireless/wl12xx/spi.c b/drivers/net/wireless/wl12xx/spi.c
index 7145ea54378..0132dad756c 100644
--- a/drivers/net/wireless/wl12xx/spi.c
+++ b/drivers/net/wireless/wl12xx/spi.c
@@ -110,6 +110,7 @@ static void wl1271_spi_reset(struct wl1271 *wl)
110 spi_message_add_tail(&t, &m); 110 spi_message_add_tail(&t, &m);
111 111
112 spi_sync(wl_to_spi(wl), &m); 112 spi_sync(wl_to_spi(wl), &m);
113
113 wl1271_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN); 114 wl1271_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN);
114 kfree(cmd); 115 kfree(cmd);
115} 116}
@@ -494,4 +495,5 @@ MODULE_LICENSE("GPL");
494MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>"); 495MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
495MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); 496MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
496MODULE_FIRMWARE(WL1271_FW_NAME); 497MODULE_FIRMWARE(WL1271_FW_NAME);
498MODULE_FIRMWARE(WL1271_AP_FW_NAME);
497MODULE_ALIAS("spi:wl1271"); 499MODULE_ALIAS("spi:wl1271");
diff --git a/drivers/net/wireless/wl12xx/tx.c b/drivers/net/wireless/wl12xx/tx.c
index b44c75cd8c1..67a00946e3d 100644
--- a/drivers/net/wireless/wl12xx/tx.c
+++ b/drivers/net/wireless/wl12xx/tx.c
@@ -23,6 +23,7 @@
23 23
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/etherdevice.h>
26 27
27#include "wl12xx.h" 28#include "wl12xx.h"
28#include "io.h" 29#include "io.h"
@@ -30,6 +31,23 @@
30#include "ps.h" 31#include "ps.h"
31#include "tx.h" 32#include "tx.h"
32 33
34static int wl1271_set_default_wep_key(struct wl1271 *wl, u8 id)
35{
36 int ret;
37 bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
38
39 if (is_ap)
40 ret = wl1271_cmd_set_ap_default_wep_key(wl, id);
41 else
42 ret = wl1271_cmd_set_sta_default_wep_key(wl, id);
43
44 if (ret < 0)
45 return ret;
46
47 wl1271_debug(DEBUG_CRYPT, "default wep key idx: %d", (int)id);
48 return 0;
49}
50
33static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb) 51static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb)
34{ 52{
35 int id; 53 int id;
@@ -99,7 +117,7 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
99{ 117{
100 struct timespec ts; 118 struct timespec ts;
101 struct wl1271_tx_hw_descr *desc; 119 struct wl1271_tx_hw_descr *desc;
102 int pad, ac; 120 int pad, ac, rate_idx;
103 s64 hosttime; 121 s64 hosttime;
104 u16 tx_attr; 122 u16 tx_attr;
105 123
@@ -117,7 +135,11 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
117 getnstimeofday(&ts); 135 getnstimeofday(&ts);
118 hosttime = (timespec_to_ns(&ts) >> 10); 136 hosttime = (timespec_to_ns(&ts) >> 10);
119 desc->start_time = cpu_to_le32(hosttime - wl->time_offset); 137 desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
120 desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU); 138
139 if (wl->bss_type != BSS_TYPE_AP_BSS)
140 desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
141 else
142 desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU);
121 143
122 /* configure the tx attributes */ 144 /* configure the tx attributes */
123 tx_attr = wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER; 145 tx_attr = wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER;
@@ -125,7 +147,41 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
125 /* queue (we use same identifiers for tid's and ac's */ 147 /* queue (we use same identifiers for tid's and ac's */
126 ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 148 ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
127 desc->tid = ac; 149 desc->tid = ac;
128 desc->aid = TX_HW_DEFAULT_AID; 150
151 if (wl->bss_type != BSS_TYPE_AP_BSS) {
152 desc->aid = TX_HW_DEFAULT_AID;
153
154 /* if the packets are destined for AP (have a STA entry)
155 send them with AP rate policies, otherwise use default
156 basic rates */
157 if (control->control.sta)
158 rate_idx = ACX_TX_AP_FULL_RATE;
159 else
160 rate_idx = ACX_TX_BASIC_RATE;
161 } else {
162 if (control->control.sta) {
163 struct wl1271_station *wl_sta;
164
165 wl_sta = (struct wl1271_station *)
166 control->control.sta->drv_priv;
167 desc->hlid = wl_sta->hlid;
168 rate_idx = ac;
169 } else {
170 struct ieee80211_hdr *hdr;
171
172 hdr = (struct ieee80211_hdr *)
173 (skb->data + sizeof(*desc));
174 if (ieee80211_is_mgmt(hdr->frame_control)) {
175 desc->hlid = WL1271_AP_GLOBAL_HLID;
176 rate_idx = ACX_TX_AP_MODE_MGMT_RATE;
177 } else {
178 desc->hlid = WL1271_AP_BROADCAST_HLID;
179 rate_idx = ACX_TX_AP_MODE_BCST_RATE;
180 }
181 }
182 }
183
184 tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY;
129 desc->reserved = 0; 185 desc->reserved = 0;
130 186
131 /* align the length (and store in terms of words) */ 187 /* align the length (and store in terms of words) */
@@ -136,14 +192,12 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
136 pad = pad - skb->len; 192 pad = pad - skb->len;
137 tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD; 193 tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD;
138 194
139 /* if the packets are destined for AP (have a STA entry) send them
140 with AP rate policies, otherwise use default basic rates */
141 if (control->control.sta)
142 tx_attr |= ACX_TX_AP_FULL_RATE << TX_HW_ATTR_OFST_RATE_POLICY;
143
144 desc->tx_attr = cpu_to_le16(tx_attr); 195 desc->tx_attr = cpu_to_le16(tx_attr);
145 196
146 wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d", pad); 197 wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d hlid: %d "
198 "tx_attr: 0x%x len: %d life: %d mem: %d", pad, desc->hlid,
199 le16_to_cpu(desc->tx_attr), le16_to_cpu(desc->length),
200 le16_to_cpu(desc->life_time), desc->total_mem_blocks);
147} 201}
148 202
149/* caller must hold wl->mutex */ 203/* caller must hold wl->mutex */
@@ -153,7 +207,6 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
153 struct ieee80211_tx_info *info; 207 struct ieee80211_tx_info *info;
154 u32 extra = 0; 208 u32 extra = 0;
155 int ret = 0; 209 int ret = 0;
156 u8 idx;
157 u32 total_len; 210 u32 total_len;
158 211
159 if (!skb) 212 if (!skb)
@@ -166,11 +219,15 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
166 extra = WL1271_TKIP_IV_SPACE; 219 extra = WL1271_TKIP_IV_SPACE;
167 220
168 if (info->control.hw_key) { 221 if (info->control.hw_key) {
169 idx = info->control.hw_key->hw_key_idx; 222 bool is_wep;
223 u8 idx = info->control.hw_key->hw_key_idx;
224 u32 cipher = info->control.hw_key->cipher;
225
226 is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) ||
227 (cipher == WLAN_CIPHER_SUITE_WEP104);
170 228
171 /* FIXME: do we have to do this if we're not using WEP? */ 229 if (unlikely(is_wep && wl->default_key != idx)) {
172 if (unlikely(wl->default_key != idx)) { 230 ret = wl1271_set_default_wep_key(wl, idx);
173 ret = wl1271_cmd_set_default_wep_key(wl, idx);
174 if (ret < 0) 231 if (ret < 0)
175 return ret; 232 return ret;
176 wl->default_key = idx; 233 wl->default_key = idx;
@@ -277,35 +334,13 @@ void wl1271_tx_work_locked(struct wl1271 *wl)
277{ 334{
278 struct sk_buff *skb; 335 struct sk_buff *skb;
279 bool woken_up = false; 336 bool woken_up = false;
280 u32 sta_rates = 0;
281 u32 buf_offset = 0; 337 u32 buf_offset = 0;
282 bool sent_packets = false; 338 bool sent_packets = false;
283 int ret; 339 int ret;
284 340
285 /* check if the rates supported by the AP have changed */
286 if (unlikely(test_and_clear_bit(WL1271_FLAG_STA_RATES_CHANGED,
287 &wl->flags))) {
288 unsigned long flags;
289
290 spin_lock_irqsave(&wl->wl_lock, flags);
291 sta_rates = wl->sta_rate_set;
292 spin_unlock_irqrestore(&wl->wl_lock, flags);
293 }
294
295 if (unlikely(wl->state == WL1271_STATE_OFF)) 341 if (unlikely(wl->state == WL1271_STATE_OFF))
296 goto out; 342 goto out;
297 343
298 /* if rates have changed, re-configure the rate policy */
299 if (unlikely(sta_rates)) {
300 ret = wl1271_ps_elp_wakeup(wl, false);
301 if (ret < 0)
302 goto out;
303 woken_up = true;
304
305 wl->rate_set = wl1271_tx_enabled_rates_get(wl, sta_rates);
306 wl1271_acx_rate_policies(wl);
307 }
308
309 while ((skb = wl1271_skb_dequeue(wl))) { 344 while ((skb = wl1271_skb_dequeue(wl))) {
310 if (!woken_up) { 345 if (!woken_up) {
311 ret = wl1271_ps_elp_wakeup(wl, false); 346 ret = wl1271_ps_elp_wakeup(wl, false);
@@ -521,3 +556,21 @@ void wl1271_tx_flush(struct wl1271 *wl)
521 556
522 wl1271_warning("Unable to flush all TX buffers, timed out."); 557 wl1271_warning("Unable to flush all TX buffers, timed out.");
523} 558}
559
560u32 wl1271_tx_min_rate_get(struct wl1271 *wl)
561{
562 int i;
563 u32 rate = 0;
564
565 if (!wl->basic_rate_set) {
566 WARN_ON(1);
567 wl->basic_rate_set = wl->conf.tx.basic_rate;
568 }
569
570 for (i = 0; !rate; i++) {
571 if ((wl->basic_rate_set >> i) & 0x1)
572 rate = 1 << i;
573 }
574
575 return rate;
576}
diff --git a/drivers/net/wireless/wl12xx/tx.h b/drivers/net/wireless/wl12xx/tx.h
index 903e5dc69b7..05722a560d9 100644
--- a/drivers/net/wireless/wl12xx/tx.h
+++ b/drivers/net/wireless/wl12xx/tx.h
@@ -29,6 +29,7 @@
29#define TX_HW_BLOCK_SIZE 252 29#define TX_HW_BLOCK_SIZE 252
30 30
31#define TX_HW_MGMT_PKT_LIFETIME_TU 2000 31#define TX_HW_MGMT_PKT_LIFETIME_TU 2000
32#define TX_HW_AP_MODE_PKT_LIFETIME_TU 8000
32/* The chipset reference driver states, that the "aid" value 1 33/* The chipset reference driver states, that the "aid" value 1
33 * is for infra-BSS, but is still always used */ 34 * is for infra-BSS, but is still always used */
34#define TX_HW_DEFAULT_AID 1 35#define TX_HW_DEFAULT_AID 1
@@ -77,8 +78,12 @@ struct wl1271_tx_hw_descr {
77 u8 id; 78 u8 id;
78 /* The packet TID value (as User-Priority) */ 79 /* The packet TID value (as User-Priority) */
79 u8 tid; 80 u8 tid;
80 /* Identifier of the remote STA in IBSS, 1 in infra-BSS */ 81 union {
81 u8 aid; 82 /* STA - Identifier of the remote STA in IBSS, 1 in infra-BSS */
83 u8 aid;
84 /* AP - host link ID (HLID) */
85 u8 hlid;
86 } __packed;
82 u8 reserved; 87 u8 reserved;
83} __packed; 88} __packed;
84 89
@@ -146,5 +151,6 @@ void wl1271_tx_reset(struct wl1271 *wl);
146void wl1271_tx_flush(struct wl1271 *wl); 151void wl1271_tx_flush(struct wl1271 *wl);
147u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band); 152u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
148u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set); 153u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set);
154u32 wl1271_tx_min_rate_get(struct wl1271 *wl);
149 155
150#endif 156#endif
diff --git a/drivers/net/wireless/wl12xx/wl12xx.h b/drivers/net/wireless/wl12xx/wl12xx.h
index 9050dd9b62d..1d6c94304b1 100644
--- a/drivers/net/wireless/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/wl12xx/wl12xx.h
@@ -38,6 +38,13 @@
38#define DRIVER_NAME "wl1271" 38#define DRIVER_NAME "wl1271"
39#define DRIVER_PREFIX DRIVER_NAME ": " 39#define DRIVER_PREFIX DRIVER_NAME ": "
40 40
41/*
42 * FW versions support BA 11n
43 * versions marks x.x.x.50-60.x
44 */
45#define WL12XX_BA_SUPPORT_FW_COST_VER2_START 50
46#define WL12XX_BA_SUPPORT_FW_COST_VER2_END 60
47
41enum { 48enum {
42 DEBUG_NONE = 0, 49 DEBUG_NONE = 0,
43 DEBUG_IRQ = BIT(0), 50 DEBUG_IRQ = BIT(0),
@@ -57,6 +64,8 @@ enum {
57 DEBUG_SDIO = BIT(14), 64 DEBUG_SDIO = BIT(14),
58 DEBUG_FILTERS = BIT(15), 65 DEBUG_FILTERS = BIT(15),
59 DEBUG_ADHOC = BIT(16), 66 DEBUG_ADHOC = BIT(16),
67 DEBUG_AP = BIT(17),
68 DEBUG_MASTER = (DEBUG_ADHOC | DEBUG_AP),
60 DEBUG_ALL = ~0, 69 DEBUG_ALL = ~0,
61}; 70};
62 71
@@ -103,16 +112,27 @@ extern u32 wl12xx_debug_level;
103 true); \ 112 true); \
104 } while (0) 113 } while (0)
105 114
106#define WL1271_DEFAULT_RX_CONFIG (CFG_UNI_FILTER_EN | \ 115#define WL1271_DEFAULT_STA_RX_CONFIG (CFG_UNI_FILTER_EN | \
107 CFG_BSSID_FILTER_EN | \ 116 CFG_BSSID_FILTER_EN | \
108 CFG_MC_FILTER_EN) 117 CFG_MC_FILTER_EN)
109 118
110#define WL1271_DEFAULT_RX_FILTER (CFG_RX_RCTS_ACK | CFG_RX_PRSP_EN | \ 119#define WL1271_DEFAULT_STA_RX_FILTER (CFG_RX_RCTS_ACK | CFG_RX_PRSP_EN | \
111 CFG_RX_MGMT_EN | CFG_RX_DATA_EN | \ 120 CFG_RX_MGMT_EN | CFG_RX_DATA_EN | \
112 CFG_RX_CTL_EN | CFG_RX_BCN_EN | \ 121 CFG_RX_CTL_EN | CFG_RX_BCN_EN | \
113 CFG_RX_AUTH_EN | CFG_RX_ASSOC_EN) 122 CFG_RX_AUTH_EN | CFG_RX_ASSOC_EN)
114 123
115#define WL1271_FW_NAME "wl1271-fw.bin" 124#define WL1271_DEFAULT_AP_RX_CONFIG 0
125
126#define WL1271_DEFAULT_AP_RX_FILTER (CFG_RX_RCTS_ACK | CFG_RX_PREQ_EN | \
127 CFG_RX_MGMT_EN | CFG_RX_DATA_EN | \
128 CFG_RX_CTL_EN | CFG_RX_AUTH_EN | \
129 CFG_RX_ASSOC_EN)
130
131
132
133#define WL1271_FW_NAME "wl1271-fw-2.bin"
134#define WL1271_AP_FW_NAME "wl1271-fw-ap.bin"
135
116#define WL1271_NVS_NAME "wl1271-nvs.bin" 136#define WL1271_NVS_NAME "wl1271-nvs.bin"
117 137
118#define WL1271_TX_SECURITY_LO16(s) ((u16)((s) & 0xffff)) 138#define WL1271_TX_SECURITY_LO16(s) ((u16)((s) & 0xffff))
@@ -129,6 +149,14 @@ extern u32 wl12xx_debug_level;
129#define WL1271_DEFAULT_BEACON_INT 100 149#define WL1271_DEFAULT_BEACON_INT 100
130#define WL1271_DEFAULT_DTIM_PERIOD 1 150#define WL1271_DEFAULT_DTIM_PERIOD 1
131 151
152#define WL1271_AP_GLOBAL_HLID 0
153#define WL1271_AP_BROADCAST_HLID 1
154#define WL1271_AP_STA_HLID_START 2
155
156#define WL1271_AP_BSS_INDEX 0
157#define WL1271_AP_DEF_INACTIV_SEC 300
158#define WL1271_AP_DEF_BEACON_EXP 20
159
132#define ACX_TX_DESCRIPTORS 32 160#define ACX_TX_DESCRIPTORS 32
133 161
134#define WL1271_AGGR_BUFFER_SIZE (4 * PAGE_SIZE) 162#define WL1271_AGGR_BUFFER_SIZE (4 * PAGE_SIZE)
@@ -161,10 +189,13 @@ struct wl1271_partition_set {
161 189
162struct wl1271; 190struct wl1271;
163 191
192#define WL12XX_NUM_FW_VER 5
193
164/* FIXME: I'm not sure about this structure name */ 194/* FIXME: I'm not sure about this structure name */
165struct wl1271_chip { 195struct wl1271_chip {
166 u32 id; 196 u32 id;
167 char fw_ver[21]; 197 char fw_ver_str[ETHTOOL_BUSINFO_LEN];
198 unsigned int fw_ver[WL12XX_NUM_FW_VER];
168}; 199};
169 200
170struct wl1271_stats { 201struct wl1271_stats {
@@ -178,8 +209,13 @@ struct wl1271_stats {
178#define NUM_TX_QUEUES 4 209#define NUM_TX_QUEUES 4
179#define NUM_RX_PKT_DESC 8 210#define NUM_RX_PKT_DESC 8
180 211
181/* FW status registers */ 212#define AP_MAX_STATIONS 5
182struct wl1271_fw_status { 213
214/* Broadcast and Global links + links to stations */
215#define AP_MAX_LINKS (AP_MAX_STATIONS + 2)
216
217/* FW status registers common for AP/STA */
218struct wl1271_fw_common_status {
183 __le32 intr; 219 __le32 intr;
184 u8 fw_rx_counter; 220 u8 fw_rx_counter;
185 u8 drv_rx_counter; 221 u8 drv_rx_counter;
@@ -188,9 +224,43 @@ struct wl1271_fw_status {
188 __le32 rx_pkt_descs[NUM_RX_PKT_DESC]; 224 __le32 rx_pkt_descs[NUM_RX_PKT_DESC];
189 __le32 tx_released_blks[NUM_TX_QUEUES]; 225 __le32 tx_released_blks[NUM_TX_QUEUES];
190 __le32 fw_localtime; 226 __le32 fw_localtime;
191 __le32 padding[2];
192} __packed; 227} __packed;
193 228
229/* FW status registers for AP */
230struct wl1271_fw_ap_status {
231 struct wl1271_fw_common_status common;
232
233 /* Next fields valid only in AP FW */
234
235 /*
236 * A bitmap (where each bit represents a single HLID)
237 * to indicate if the station is in PS mode.
238 */
239 __le32 link_ps_bitmap;
240
241 /* Number of freed MBs per HLID */
242 u8 tx_lnk_free_blks[AP_MAX_LINKS];
243 u8 padding_1[1];
244} __packed;
245
246/* FW status registers for STA */
247struct wl1271_fw_sta_status {
248 struct wl1271_fw_common_status common;
249
250 u8 tx_total;
251 u8 reserved1;
252 __le16 reserved2;
253} __packed;
254
255struct wl1271_fw_full_status {
256 union {
257 struct wl1271_fw_common_status common;
258 struct wl1271_fw_sta_status sta;
259 struct wl1271_fw_ap_status ap;
260 };
261} __packed;
262
263
194struct wl1271_rx_mem_pool_addr { 264struct wl1271_rx_mem_pool_addr {
195 u32 addr; 265 u32 addr;
196 u32 addr_extra; 266 u32 addr_extra;
@@ -218,6 +288,37 @@ struct wl1271_if_operations {
218 void (*disable_irq)(struct wl1271 *wl); 288 void (*disable_irq)(struct wl1271 *wl);
219}; 289};
220 290
291#define MAX_NUM_KEYS 14
292#define MAX_KEY_SIZE 32
293
294struct wl1271_ap_key {
295 u8 id;
296 u8 key_type;
297 u8 key_size;
298 u8 key[MAX_KEY_SIZE];
299 u8 hlid;
300 u32 tx_seq_32;
301 u16 tx_seq_16;
302};
303
304enum wl12xx_flags {
305 WL1271_FLAG_STA_ASSOCIATED,
306 WL1271_FLAG_JOINED,
307 WL1271_FLAG_GPIO_POWER,
308 WL1271_FLAG_TX_QUEUE_STOPPED,
309 WL1271_FLAG_IN_ELP,
310 WL1271_FLAG_PSM,
311 WL1271_FLAG_PSM_REQUESTED,
312 WL1271_FLAG_IRQ_PENDING,
313 WL1271_FLAG_IRQ_RUNNING,
314 WL1271_FLAG_IDLE,
315 WL1271_FLAG_IDLE_REQUESTED,
316 WL1271_FLAG_PSPOLL_FAILURE,
317 WL1271_FLAG_STA_STATE_SENT,
318 WL1271_FLAG_FW_TX_BUSY,
319 WL1271_FLAG_AP_STARTED
320};
321
221struct wl1271 { 322struct wl1271 {
222 struct platform_device *plat_dev; 323 struct platform_device *plat_dev;
223 struct ieee80211_hw *hw; 324 struct ieee80211_hw *hw;
@@ -236,21 +337,6 @@ struct wl1271 {
236 enum wl1271_state state; 337 enum wl1271_state state;
237 struct mutex mutex; 338 struct mutex mutex;
238 339
239#define WL1271_FLAG_STA_RATES_CHANGED (0)
240#define WL1271_FLAG_STA_ASSOCIATED (1)
241#define WL1271_FLAG_JOINED (2)
242#define WL1271_FLAG_GPIO_POWER (3)
243#define WL1271_FLAG_TX_QUEUE_STOPPED (4)
244#define WL1271_FLAG_IN_ELP (5)
245#define WL1271_FLAG_PSM (6)
246#define WL1271_FLAG_PSM_REQUESTED (7)
247#define WL1271_FLAG_IRQ_PENDING (8)
248#define WL1271_FLAG_IRQ_RUNNING (9)
249#define WL1271_FLAG_IDLE (10)
250#define WL1271_FLAG_IDLE_REQUESTED (11)
251#define WL1271_FLAG_PSPOLL_FAILURE (12)
252#define WL1271_FLAG_STA_STATE_SENT (13)
253#define WL1271_FLAG_FW_TX_BUSY (14)
254 unsigned long flags; 340 unsigned long flags;
255 341
256 struct wl1271_partition_set part; 342 struct wl1271_partition_set part;
@@ -262,6 +348,7 @@ struct wl1271 {
262 348
263 u8 *fw; 349 u8 *fw;
264 size_t fw_len; 350 size_t fw_len;
351 u8 fw_bss_type;
265 struct wl1271_nvs_file *nvs; 352 struct wl1271_nvs_file *nvs;
266 size_t nvs_len; 353 size_t nvs_len;
267 354
@@ -343,7 +430,6 @@ struct wl1271 {
343 * bits 16-23 - 802.11n MCS index mask 430 * bits 16-23 - 802.11n MCS index mask
344 * support only 1 stream, thus only 8 bits for the MCS rates (0-7). 431 * support only 1 stream, thus only 8 bits for the MCS rates (0-7).
345 */ 432 */
346 u32 sta_rate_set;
347 u32 basic_rate_set; 433 u32 basic_rate_set;
348 u32 basic_rate; 434 u32 basic_rate;
349 u32 rate_set; 435 u32 rate_set;
@@ -378,13 +464,12 @@ struct wl1271 {
378 int last_rssi_event; 464 int last_rssi_event;
379 465
380 struct wl1271_stats stats; 466 struct wl1271_stats stats;
381 struct dentry *rootdir;
382 467
383 __le32 buffer_32; 468 __le32 buffer_32;
384 u32 buffer_cmd; 469 u32 buffer_cmd;
385 u32 buffer_busyword[WL1271_BUSY_WORD_CNT]; 470 u32 buffer_busyword[WL1271_BUSY_WORD_CNT];
386 471
387 struct wl1271_fw_status *fw_status; 472 struct wl1271_fw_full_status *fw_status;
388 struct wl1271_tx_hw_res_if *tx_res_if; 473 struct wl1271_tx_hw_res_if *tx_res_if;
389 474
390 struct ieee80211_vif *vif; 475 struct ieee80211_vif *vif;
@@ -400,6 +485,23 @@ struct wl1271 {
400 485
401 /* Most recently reported noise in dBm */ 486 /* Most recently reported noise in dBm */
402 s8 noise; 487 s8 noise;
488
489 /* map for HLIDs of associated stations - when operating in AP mode */
490 unsigned long ap_hlid_map[BITS_TO_LONGS(AP_MAX_STATIONS)];
491
492 /* recoreded keys for AP-mode - set here before AP startup */
493 struct wl1271_ap_key *recorded_ap_keys[MAX_NUM_KEYS];
494
495 /* bands supported by this instance of wl12xx */
496 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
497
498 /* RX BA constraint value */
499 bool ba_support;
500 u8 ba_rx_bitmap;
501};
502
503struct wl1271_station {
504 u8 hlid;
403}; 505};
404 506
405int wl1271_plt_start(struct wl1271 *wl); 507int wl1271_plt_start(struct wl1271 *wl);
diff --git a/drivers/net/wireless/wl12xx/wl12xx_80211.h b/drivers/net/wireless/wl12xx/wl12xx_80211.h
index be21032f4dc..67dcf8f28cd 100644
--- a/drivers/net/wireless/wl12xx/wl12xx_80211.h
+++ b/drivers/net/wireless/wl12xx/wl12xx_80211.h
@@ -138,13 +138,13 @@ struct wl12xx_arp_rsp_template {
138 struct ieee80211_hdr_3addr hdr; 138 struct ieee80211_hdr_3addr hdr;
139 139
140 u8 llc_hdr[sizeof(rfc1042_header)]; 140 u8 llc_hdr[sizeof(rfc1042_header)];
141 u16 llc_type; 141 __be16 llc_type;
142 142
143 struct arphdr arp_hdr; 143 struct arphdr arp_hdr;
144 u8 sender_hw[ETH_ALEN]; 144 u8 sender_hw[ETH_ALEN];
145 u32 sender_ip; 145 __be32 sender_ip;
146 u8 target_hw[ETH_ALEN]; 146 u8 target_hw[ETH_ALEN];
147 u32 target_ip; 147 __be32 target_ip;
148} __packed; 148} __packed;
149 149
150 150
@@ -160,4 +160,9 @@ struct wl12xx_probe_resp_template {
160 struct wl12xx_ie_country country; 160 struct wl12xx_ie_country country;
161} __packed; 161} __packed;
162 162
163struct wl12xx_disconn_template {
164 struct ieee80211_header header;
165 __le16 disconn_reason;
166} __packed;
167
163#endif 168#endif
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index 6a9b66051cf..a73a305d3cb 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -108,25 +108,17 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr
108{ 108{
109 int r; 109 int r;
110 int i; 110 int i;
111 zd_addr_t *a16; 111 zd_addr_t a16[USB_MAX_IOREAD32_COUNT * 2];
112 u16 *v16; 112 u16 v16[USB_MAX_IOREAD32_COUNT * 2];
113 unsigned int count16; 113 unsigned int count16;
114 114
115 if (count > USB_MAX_IOREAD32_COUNT) 115 if (count > USB_MAX_IOREAD32_COUNT)
116 return -EINVAL; 116 return -EINVAL;
117 117
118 /* Allocate a single memory block for values and addresses. */ 118 /* Use stack for values and addresses. */
119 count16 = 2*count; 119 count16 = 2 * count;
120 /* zd_addr_t is __nocast, so the kmalloc needs an explicit cast */ 120 BUG_ON(count16 * sizeof(zd_addr_t) > sizeof(a16));
121 a16 = (zd_addr_t *) kmalloc(count16 * (sizeof(zd_addr_t) + sizeof(u16)), 121 BUG_ON(count16 * sizeof(u16) > sizeof(v16));
122 GFP_KERNEL);
123 if (!a16) {
124 dev_dbg_f(zd_chip_dev(chip),
125 "error ENOMEM in allocation of a16\n");
126 r = -ENOMEM;
127 goto out;
128 }
129 v16 = (u16 *)(a16 + count16);
130 122
131 for (i = 0; i < count; i++) { 123 for (i = 0; i < count; i++) {
132 int j = 2*i; 124 int j = 2*i;
@@ -139,7 +131,7 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr
139 if (r) { 131 if (r) {
140 dev_dbg_f(zd_chip_dev(chip), 132 dev_dbg_f(zd_chip_dev(chip),
141 "error: zd_ioread16v_locked. Error number %d\n", r); 133 "error: zd_ioread16v_locked. Error number %d\n", r);
142 goto out; 134 return r;
143 } 135 }
144 136
145 for (i = 0; i < count; i++) { 137 for (i = 0; i < count; i++) {
@@ -147,18 +139,19 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr
147 values[i] = (v16[j] << 16) | v16[j+1]; 139 values[i] = (v16[j] << 16) | v16[j+1];
148 } 140 }
149 141
150out: 142 return 0;
151 kfree((void *)a16);
152 return r;
153} 143}
154 144
155int _zd_iowrite32v_locked(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs, 145static int _zd_iowrite32v_async_locked(struct zd_chip *chip,
156 unsigned int count) 146 const struct zd_ioreq32 *ioreqs,
147 unsigned int count)
157{ 148{
158 int i, j, r; 149 int i, j, r;
159 struct zd_ioreq16 *ioreqs16; 150 struct zd_ioreq16 ioreqs16[USB_MAX_IOWRITE32_COUNT * 2];
160 unsigned int count16; 151 unsigned int count16;
161 152
153 /* Use stack for values and addresses. */
154
162 ZD_ASSERT(mutex_is_locked(&chip->mutex)); 155 ZD_ASSERT(mutex_is_locked(&chip->mutex));
163 156
164 if (count == 0) 157 if (count == 0)
@@ -166,15 +159,8 @@ int _zd_iowrite32v_locked(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs,
166 if (count > USB_MAX_IOWRITE32_COUNT) 159 if (count > USB_MAX_IOWRITE32_COUNT)
167 return -EINVAL; 160 return -EINVAL;
168 161
169 /* Allocate a single memory block for values and addresses. */ 162 count16 = 2 * count;
170 count16 = 2*count; 163 BUG_ON(count16 * sizeof(struct zd_ioreq16) > sizeof(ioreqs16));
171 ioreqs16 = kmalloc(count16 * sizeof(struct zd_ioreq16), GFP_KERNEL);
172 if (!ioreqs16) {
173 r = -ENOMEM;
174 dev_dbg_f(zd_chip_dev(chip),
175 "error %d in ioreqs16 allocation\n", r);
176 goto out;
177 }
178 164
179 for (i = 0; i < count; i++) { 165 for (i = 0; i < count; i++) {
180 j = 2*i; 166 j = 2*i;
@@ -185,18 +171,30 @@ int _zd_iowrite32v_locked(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs,
185 ioreqs16[j+1].addr = ioreqs[i].addr; 171 ioreqs16[j+1].addr = ioreqs[i].addr;
186 } 172 }
187 173
188 r = zd_usb_iowrite16v(&chip->usb, ioreqs16, count16); 174 r = zd_usb_iowrite16v_async(&chip->usb, ioreqs16, count16);
189#ifdef DEBUG 175#ifdef DEBUG
190 if (r) { 176 if (r) {
191 dev_dbg_f(zd_chip_dev(chip), 177 dev_dbg_f(zd_chip_dev(chip),
192 "error %d in zd_usb_write16v\n", r); 178 "error %d in zd_usb_write16v\n", r);
193 } 179 }
194#endif /* DEBUG */ 180#endif /* DEBUG */
195out:
196 kfree(ioreqs16);
197 return r; 181 return r;
198} 182}
199 183
184int _zd_iowrite32v_locked(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs,
185 unsigned int count)
186{
187 int r;
188
189 zd_usb_iowrite16v_async_start(&chip->usb);
190 r = _zd_iowrite32v_async_locked(chip, ioreqs, count);
191 if (r) {
192 zd_usb_iowrite16v_async_end(&chip->usb, 0);
193 return r;
194 }
195 return zd_usb_iowrite16v_async_end(&chip->usb, 50 /* ms */);
196}
197
200int zd_iowrite16a_locked(struct zd_chip *chip, 198int zd_iowrite16a_locked(struct zd_chip *chip,
201 const struct zd_ioreq16 *ioreqs, unsigned int count) 199 const struct zd_ioreq16 *ioreqs, unsigned int count)
202{ 200{
@@ -204,6 +202,8 @@ int zd_iowrite16a_locked(struct zd_chip *chip,
204 unsigned int i, j, t, max; 202 unsigned int i, j, t, max;
205 203
206 ZD_ASSERT(mutex_is_locked(&chip->mutex)); 204 ZD_ASSERT(mutex_is_locked(&chip->mutex));
205 zd_usb_iowrite16v_async_start(&chip->usb);
206
207 for (i = 0; i < count; i += j + t) { 207 for (i = 0; i < count; i += j + t) {
208 t = 0; 208 t = 0;
209 max = count-i; 209 max = count-i;
@@ -216,8 +216,9 @@ int zd_iowrite16a_locked(struct zd_chip *chip,
216 } 216 }
217 } 217 }
218 218
219 r = zd_usb_iowrite16v(&chip->usb, &ioreqs[i], j); 219 r = zd_usb_iowrite16v_async(&chip->usb, &ioreqs[i], j);
220 if (r) { 220 if (r) {
221 zd_usb_iowrite16v_async_end(&chip->usb, 0);
221 dev_dbg_f(zd_chip_dev(chip), 222 dev_dbg_f(zd_chip_dev(chip),
222 "error zd_usb_iowrite16v. Error number %d\n", 223 "error zd_usb_iowrite16v. Error number %d\n",
223 r); 224 r);
@@ -225,7 +226,7 @@ int zd_iowrite16a_locked(struct zd_chip *chip,
225 } 226 }
226 } 227 }
227 228
228 return 0; 229 return zd_usb_iowrite16v_async_end(&chip->usb, 50 /* ms */);
229} 230}
230 231
231/* Writes a variable number of 32 bit registers. The functions will split 232/* Writes a variable number of 32 bit registers. The functions will split
@@ -238,6 +239,8 @@ int zd_iowrite32a_locked(struct zd_chip *chip,
238 int r; 239 int r;
239 unsigned int i, j, t, max; 240 unsigned int i, j, t, max;
240 241
242 zd_usb_iowrite16v_async_start(&chip->usb);
243
241 for (i = 0; i < count; i += j + t) { 244 for (i = 0; i < count; i += j + t) {
242 t = 0; 245 t = 0;
243 max = count-i; 246 max = count-i;
@@ -250,8 +253,9 @@ int zd_iowrite32a_locked(struct zd_chip *chip,
250 } 253 }
251 } 254 }
252 255
253 r = _zd_iowrite32v_locked(chip, &ioreqs[i], j); 256 r = _zd_iowrite32v_async_locked(chip, &ioreqs[i], j);
254 if (r) { 257 if (r) {
258 zd_usb_iowrite16v_async_end(&chip->usb, 0);
255 dev_dbg_f(zd_chip_dev(chip), 259 dev_dbg_f(zd_chip_dev(chip),
256 "error _zd_iowrite32v_locked." 260 "error _zd_iowrite32v_locked."
257 " Error number %d\n", r); 261 " Error number %d\n", r);
@@ -259,7 +263,7 @@ int zd_iowrite32a_locked(struct zd_chip *chip,
259 } 263 }
260 } 264 }
261 265
262 return 0; 266 return zd_usb_iowrite16v_async_end(&chip->usb, 50 /* ms */);
263} 267}
264 268
265int zd_ioread16(struct zd_chip *chip, zd_addr_t addr, u16 *value) 269int zd_ioread16(struct zd_chip *chip, zd_addr_t addr, u16 *value)
@@ -370,16 +374,12 @@ error:
370 return r; 374 return r;
371} 375}
372 376
373/* MAC address: if custom mac addresses are to be used CR_MAC_ADDR_P1 and 377static int zd_write_mac_addr_common(struct zd_chip *chip, const u8 *mac_addr,
374 * CR_MAC_ADDR_P2 must be overwritten 378 const struct zd_ioreq32 *in_reqs,
375 */ 379 const char *type)
376int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr)
377{ 380{
378 int r; 381 int r;
379 struct zd_ioreq32 reqs[2] = { 382 struct zd_ioreq32 reqs[2] = {in_reqs[0], in_reqs[1]};
380 [0] = { .addr = CR_MAC_ADDR_P1 },
381 [1] = { .addr = CR_MAC_ADDR_P2 },
382 };
383 383
384 if (mac_addr) { 384 if (mac_addr) {
385 reqs[0].value = (mac_addr[3] << 24) 385 reqs[0].value = (mac_addr[3] << 24)
@@ -388,9 +388,9 @@ int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr)
388 | mac_addr[0]; 388 | mac_addr[0];
389 reqs[1].value = (mac_addr[5] << 8) 389 reqs[1].value = (mac_addr[5] << 8)
390 | mac_addr[4]; 390 | mac_addr[4];
391 dev_dbg_f(zd_chip_dev(chip), "mac addr %pM\n", mac_addr); 391 dev_dbg_f(zd_chip_dev(chip), "%s addr %pM\n", type, mac_addr);
392 } else { 392 } else {
393 dev_dbg_f(zd_chip_dev(chip), "set NULL mac\n"); 393 dev_dbg_f(zd_chip_dev(chip), "set NULL %s\n", type);
394 } 394 }
395 395
396 mutex_lock(&chip->mutex); 396 mutex_lock(&chip->mutex);
@@ -399,6 +399,29 @@ int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr)
399 return r; 399 return r;
400} 400}
401 401
402/* MAC address: if custom mac addresses are to be used CR_MAC_ADDR_P1 and
403 * CR_MAC_ADDR_P2 must be overwritten
404 */
405int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr)
406{
407 static const struct zd_ioreq32 reqs[2] = {
408 [0] = { .addr = CR_MAC_ADDR_P1 },
409 [1] = { .addr = CR_MAC_ADDR_P2 },
410 };
411
412 return zd_write_mac_addr_common(chip, mac_addr, reqs, "mac");
413}
414
415int zd_write_bssid(struct zd_chip *chip, const u8 *bssid)
416{
417 static const struct zd_ioreq32 reqs[2] = {
418 [0] = { .addr = CR_BSSID_P1 },
419 [1] = { .addr = CR_BSSID_P2 },
420 };
421
422 return zd_write_mac_addr_common(chip, bssid, reqs, "bssid");
423}
424
402int zd_read_regdomain(struct zd_chip *chip, u8 *regdomain) 425int zd_read_regdomain(struct zd_chip *chip, u8 *regdomain)
403{ 426{
404 int r; 427 int r;
@@ -849,11 +872,12 @@ static int get_aw_pt_bi(struct zd_chip *chip, struct aw_pt_bi *s)
849static int set_aw_pt_bi(struct zd_chip *chip, struct aw_pt_bi *s) 872static int set_aw_pt_bi(struct zd_chip *chip, struct aw_pt_bi *s)
850{ 873{
851 struct zd_ioreq32 reqs[3]; 874 struct zd_ioreq32 reqs[3];
875 u16 b_interval = s->beacon_interval & 0xffff;
852 876
853 if (s->beacon_interval <= 5) 877 if (b_interval <= 5)
854 s->beacon_interval = 5; 878 b_interval = 5;
855 if (s->pre_tbtt < 4 || s->pre_tbtt >= s->beacon_interval) 879 if (s->pre_tbtt < 4 || s->pre_tbtt >= b_interval)
856 s->pre_tbtt = s->beacon_interval - 1; 880 s->pre_tbtt = b_interval - 1;
857 if (s->atim_wnd_period >= s->pre_tbtt) 881 if (s->atim_wnd_period >= s->pre_tbtt)
858 s->atim_wnd_period = s->pre_tbtt - 1; 882 s->atim_wnd_period = s->pre_tbtt - 1;
859 883
@@ -862,31 +886,57 @@ static int set_aw_pt_bi(struct zd_chip *chip, struct aw_pt_bi *s)
862 reqs[1].addr = CR_PRE_TBTT; 886 reqs[1].addr = CR_PRE_TBTT;
863 reqs[1].value = s->pre_tbtt; 887 reqs[1].value = s->pre_tbtt;
864 reqs[2].addr = CR_BCN_INTERVAL; 888 reqs[2].addr = CR_BCN_INTERVAL;
865 reqs[2].value = s->beacon_interval; 889 reqs[2].value = (s->beacon_interval & ~0xffff) | b_interval;
866 890
867 return zd_iowrite32a_locked(chip, reqs, ARRAY_SIZE(reqs)); 891 return zd_iowrite32a_locked(chip, reqs, ARRAY_SIZE(reqs));
868} 892}
869 893
870 894
871static int set_beacon_interval(struct zd_chip *chip, u32 interval) 895static int set_beacon_interval(struct zd_chip *chip, u16 interval,
896 u8 dtim_period, int type)
872{ 897{
873 int r; 898 int r;
874 struct aw_pt_bi s; 899 struct aw_pt_bi s;
900 u32 b_interval, mode_flag;
875 901
876 ZD_ASSERT(mutex_is_locked(&chip->mutex)); 902 ZD_ASSERT(mutex_is_locked(&chip->mutex));
903
904 if (interval > 0) {
905 switch (type) {
906 case NL80211_IFTYPE_ADHOC:
907 case NL80211_IFTYPE_MESH_POINT:
908 mode_flag = BCN_MODE_IBSS;
909 break;
910 case NL80211_IFTYPE_AP:
911 mode_flag = BCN_MODE_AP;
912 break;
913 default:
914 mode_flag = 0;
915 break;
916 }
917 } else {
918 dtim_period = 0;
919 mode_flag = 0;
920 }
921
922 b_interval = mode_flag | (dtim_period << 16) | interval;
923
924 r = zd_iowrite32_locked(chip, b_interval, CR_BCN_INTERVAL);
925 if (r)
926 return r;
877 r = get_aw_pt_bi(chip, &s); 927 r = get_aw_pt_bi(chip, &s);
878 if (r) 928 if (r)
879 return r; 929 return r;
880 s.beacon_interval = interval;
881 return set_aw_pt_bi(chip, &s); 930 return set_aw_pt_bi(chip, &s);
882} 931}
883 932
884int zd_set_beacon_interval(struct zd_chip *chip, u32 interval) 933int zd_set_beacon_interval(struct zd_chip *chip, u16 interval, u8 dtim_period,
934 int type)
885{ 935{
886 int r; 936 int r;
887 937
888 mutex_lock(&chip->mutex); 938 mutex_lock(&chip->mutex);
889 r = set_beacon_interval(chip, interval); 939 r = set_beacon_interval(chip, interval, dtim_period, type);
890 mutex_unlock(&chip->mutex); 940 mutex_unlock(&chip->mutex);
891 return r; 941 return r;
892} 942}
@@ -905,7 +955,7 @@ static int hw_init(struct zd_chip *chip)
905 if (r) 955 if (r)
906 return r; 956 return r;
907 957
908 return set_beacon_interval(chip, 100); 958 return set_beacon_interval(chip, 100, 0, NL80211_IFTYPE_UNSPECIFIED);
909} 959}
910 960
911static zd_addr_t fw_reg_addr(struct zd_chip *chip, u16 offset) 961static zd_addr_t fw_reg_addr(struct zd_chip *chip, u16 offset)
@@ -1407,6 +1457,9 @@ void zd_chip_disable_int(struct zd_chip *chip)
1407 mutex_lock(&chip->mutex); 1457 mutex_lock(&chip->mutex);
1408 zd_usb_disable_int(&chip->usb); 1458 zd_usb_disable_int(&chip->usb);
1409 mutex_unlock(&chip->mutex); 1459 mutex_unlock(&chip->mutex);
1460
1461 /* cancel pending interrupt work */
1462 cancel_work_sync(&zd_chip_to_mac(chip)->process_intr);
1410} 1463}
1411 1464
1412int zd_chip_enable_rxtx(struct zd_chip *chip) 1465int zd_chip_enable_rxtx(struct zd_chip *chip)
@@ -1416,6 +1469,7 @@ int zd_chip_enable_rxtx(struct zd_chip *chip)
1416 mutex_lock(&chip->mutex); 1469 mutex_lock(&chip->mutex);
1417 zd_usb_enable_tx(&chip->usb); 1470 zd_usb_enable_tx(&chip->usb);
1418 r = zd_usb_enable_rx(&chip->usb); 1471 r = zd_usb_enable_rx(&chip->usb);
1472 zd_tx_watchdog_enable(&chip->usb);
1419 mutex_unlock(&chip->mutex); 1473 mutex_unlock(&chip->mutex);
1420 return r; 1474 return r;
1421} 1475}
@@ -1423,6 +1477,7 @@ int zd_chip_enable_rxtx(struct zd_chip *chip)
1423void zd_chip_disable_rxtx(struct zd_chip *chip) 1477void zd_chip_disable_rxtx(struct zd_chip *chip)
1424{ 1478{
1425 mutex_lock(&chip->mutex); 1479 mutex_lock(&chip->mutex);
1480 zd_tx_watchdog_disable(&chip->usb);
1426 zd_usb_disable_rx(&chip->usb); 1481 zd_usb_disable_rx(&chip->usb);
1427 zd_usb_disable_tx(&chip->usb); 1482 zd_usb_disable_tx(&chip->usb);
1428 mutex_unlock(&chip->mutex); 1483 mutex_unlock(&chip->mutex);
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h
index f8bbf7d302a..14e4402a611 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.h
+++ b/drivers/net/wireless/zd1211rw/zd_chip.h
@@ -546,6 +546,7 @@ enum {
546#define RX_FILTER_CTRL (RX_FILTER_RTS | RX_FILTER_CTS | \ 546#define RX_FILTER_CTRL (RX_FILTER_RTS | RX_FILTER_CTS | \
547 RX_FILTER_CFEND | RX_FILTER_CFACK) 547 RX_FILTER_CFEND | RX_FILTER_CFACK)
548 548
549#define BCN_MODE_AP 0x1000000
549#define BCN_MODE_IBSS 0x2000000 550#define BCN_MODE_IBSS 0x2000000
550 551
551/* Monitor mode sets filter to 0xfffff */ 552/* Monitor mode sets filter to 0xfffff */
@@ -881,6 +882,7 @@ static inline u8 _zd_chip_get_channel(struct zd_chip *chip)
881u8 zd_chip_get_channel(struct zd_chip *chip); 882u8 zd_chip_get_channel(struct zd_chip *chip);
882int zd_read_regdomain(struct zd_chip *chip, u8 *regdomain); 883int zd_read_regdomain(struct zd_chip *chip, u8 *regdomain);
883int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr); 884int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr);
885int zd_write_bssid(struct zd_chip *chip, const u8 *bssid);
884int zd_chip_switch_radio_on(struct zd_chip *chip); 886int zd_chip_switch_radio_on(struct zd_chip *chip);
885int zd_chip_switch_radio_off(struct zd_chip *chip); 887int zd_chip_switch_radio_off(struct zd_chip *chip);
886int zd_chip_enable_int(struct zd_chip *chip); 888int zd_chip_enable_int(struct zd_chip *chip);
@@ -920,7 +922,8 @@ enum led_status {
920 922
921int zd_chip_control_leds(struct zd_chip *chip, enum led_status status); 923int zd_chip_control_leds(struct zd_chip *chip, enum led_status status);
922 924
923int zd_set_beacon_interval(struct zd_chip *chip, u32 interval); 925int zd_set_beacon_interval(struct zd_chip *chip, u16 interval, u8 dtim_period,
926 int type);
924 927
925static inline int zd_get_beacon_interval(struct zd_chip *chip, u32 *interval) 928static inline int zd_get_beacon_interval(struct zd_chip *chip, u32 *interval)
926{ 929{
diff --git a/drivers/net/wireless/zd1211rw/zd_def.h b/drivers/net/wireless/zd1211rw/zd_def.h
index 6ac597ffd3b..5463ca9ebc0 100644
--- a/drivers/net/wireless/zd1211rw/zd_def.h
+++ b/drivers/net/wireless/zd1211rw/zd_def.h
@@ -45,7 +45,7 @@ typedef u16 __nocast zd_addr_t;
45#ifdef DEBUG 45#ifdef DEBUG
46# define ZD_ASSERT(x) \ 46# define ZD_ASSERT(x) \
47do { \ 47do { \
48 if (!(x)) { \ 48 if (unlikely(!(x))) { \
49 pr_debug("%s:%d ASSERT %s VIOLATED!\n", \ 49 pr_debug("%s:%d ASSERT %s VIOLATED!\n", \
50 __FILE__, __LINE__, __stringify(x)); \ 50 __FILE__, __LINE__, __stringify(x)); \
51 dump_stack(); \ 51 dump_stack(); \
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 6107304cb94..74a269ebbeb 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -138,6 +138,12 @@ static const struct ieee80211_channel zd_channels[] = {
138static void housekeeping_init(struct zd_mac *mac); 138static void housekeeping_init(struct zd_mac *mac);
139static void housekeeping_enable(struct zd_mac *mac); 139static void housekeeping_enable(struct zd_mac *mac);
140static void housekeeping_disable(struct zd_mac *mac); 140static void housekeeping_disable(struct zd_mac *mac);
141static void beacon_init(struct zd_mac *mac);
142static void beacon_enable(struct zd_mac *mac);
143static void beacon_disable(struct zd_mac *mac);
144static void set_rts_cts(struct zd_mac *mac, unsigned int short_preamble);
145static int zd_mac_config_beacon(struct ieee80211_hw *hw,
146 struct sk_buff *beacon);
141 147
142static int zd_reg2alpha2(u8 regdomain, char *alpha2) 148static int zd_reg2alpha2(u8 regdomain, char *alpha2)
143{ 149{
@@ -231,6 +237,26 @@ static int set_rx_filter(struct zd_mac *mac)
231 return zd_iowrite32(&mac->chip, CR_RX_FILTER, filter); 237 return zd_iowrite32(&mac->chip, CR_RX_FILTER, filter);
232} 238}
233 239
240static int set_mac_and_bssid(struct zd_mac *mac)
241{
242 int r;
243
244 if (!mac->vif)
245 return -1;
246
247 r = zd_write_mac_addr(&mac->chip, mac->vif->addr);
248 if (r)
249 return r;
250
251 /* Vendor driver after setting MAC either sets BSSID for AP or
252 * filter for other modes.
253 */
254 if (mac->type != NL80211_IFTYPE_AP)
255 return set_rx_filter(mac);
256 else
257 return zd_write_bssid(&mac->chip, mac->vif->addr);
258}
259
234static int set_mc_hash(struct zd_mac *mac) 260static int set_mc_hash(struct zd_mac *mac)
235{ 261{
236 struct zd_mc_hash hash; 262 struct zd_mc_hash hash;
@@ -238,7 +264,7 @@ static int set_mc_hash(struct zd_mac *mac)
238 return zd_chip_set_multicast_hash(&mac->chip, &hash); 264 return zd_chip_set_multicast_hash(&mac->chip, &hash);
239} 265}
240 266
241static int zd_op_start(struct ieee80211_hw *hw) 267int zd_op_start(struct ieee80211_hw *hw)
242{ 268{
243 struct zd_mac *mac = zd_hw_mac(hw); 269 struct zd_mac *mac = zd_hw_mac(hw);
244 struct zd_chip *chip = &mac->chip; 270 struct zd_chip *chip = &mac->chip;
@@ -275,6 +301,8 @@ static int zd_op_start(struct ieee80211_hw *hw)
275 goto disable_rxtx; 301 goto disable_rxtx;
276 302
277 housekeeping_enable(mac); 303 housekeeping_enable(mac);
304 beacon_enable(mac);
305 set_bit(ZD_DEVICE_RUNNING, &mac->flags);
278 return 0; 306 return 0;
279disable_rxtx: 307disable_rxtx:
280 zd_chip_disable_rxtx(chip); 308 zd_chip_disable_rxtx(chip);
@@ -286,19 +314,22 @@ out:
286 return r; 314 return r;
287} 315}
288 316
289static void zd_op_stop(struct ieee80211_hw *hw) 317void zd_op_stop(struct ieee80211_hw *hw)
290{ 318{
291 struct zd_mac *mac = zd_hw_mac(hw); 319 struct zd_mac *mac = zd_hw_mac(hw);
292 struct zd_chip *chip = &mac->chip; 320 struct zd_chip *chip = &mac->chip;
293 struct sk_buff *skb; 321 struct sk_buff *skb;
294 struct sk_buff_head *ack_wait_queue = &mac->ack_wait_queue; 322 struct sk_buff_head *ack_wait_queue = &mac->ack_wait_queue;
295 323
324 clear_bit(ZD_DEVICE_RUNNING, &mac->flags);
325
296 /* The order here deliberately is a little different from the open() 326 /* The order here deliberately is a little different from the open()
297 * method, since we need to make sure there is no opportunity for RX 327 * method, since we need to make sure there is no opportunity for RX
298 * frames to be processed by mac80211 after we have stopped it. 328 * frames to be processed by mac80211 after we have stopped it.
299 */ 329 */
300 330
301 zd_chip_disable_rxtx(chip); 331 zd_chip_disable_rxtx(chip);
332 beacon_disable(mac);
302 housekeeping_disable(mac); 333 housekeeping_disable(mac);
303 flush_workqueue(zd_workqueue); 334 flush_workqueue(zd_workqueue);
304 335
@@ -311,6 +342,68 @@ static void zd_op_stop(struct ieee80211_hw *hw)
311 dev_kfree_skb_any(skb); 342 dev_kfree_skb_any(skb);
312} 343}
313 344
345int zd_restore_settings(struct zd_mac *mac)
346{
347 struct sk_buff *beacon;
348 struct zd_mc_hash multicast_hash;
349 unsigned int short_preamble;
350 int r, beacon_interval, beacon_period;
351 u8 channel;
352
353 dev_dbg_f(zd_mac_dev(mac), "\n");
354
355 spin_lock_irq(&mac->lock);
356 multicast_hash = mac->multicast_hash;
357 short_preamble = mac->short_preamble;
358 beacon_interval = mac->beacon.interval;
359 beacon_period = mac->beacon.period;
360 channel = mac->channel;
361 spin_unlock_irq(&mac->lock);
362
363 r = set_mac_and_bssid(mac);
364 if (r < 0) {
365 dev_dbg_f(zd_mac_dev(mac), "set_mac_and_bssid failed, %d\n", r);
366 return r;
367 }
368
369 r = zd_chip_set_channel(&mac->chip, channel);
370 if (r < 0) {
371 dev_dbg_f(zd_mac_dev(mac), "zd_chip_set_channel failed, %d\n",
372 r);
373 return r;
374 }
375
376 set_rts_cts(mac, short_preamble);
377
378 r = zd_chip_set_multicast_hash(&mac->chip, &multicast_hash);
379 if (r < 0) {
380 dev_dbg_f(zd_mac_dev(mac),
381 "zd_chip_set_multicast_hash failed, %d\n", r);
382 return r;
383 }
384
385 if (mac->type == NL80211_IFTYPE_MESH_POINT ||
386 mac->type == NL80211_IFTYPE_ADHOC ||
387 mac->type == NL80211_IFTYPE_AP) {
388 if (mac->vif != NULL) {
389 beacon = ieee80211_beacon_get(mac->hw, mac->vif);
390 if (beacon) {
391 zd_mac_config_beacon(mac->hw, beacon);
392 kfree_skb(beacon);
393 }
394 }
395
396 zd_set_beacon_interval(&mac->chip, beacon_interval,
397 beacon_period, mac->type);
398
399 spin_lock_irq(&mac->lock);
400 mac->beacon.last_update = jiffies;
401 spin_unlock_irq(&mac->lock);
402 }
403
404 return 0;
405}
406
314/** 407/**
315 * zd_mac_tx_status - reports tx status of a packet if required 408 * zd_mac_tx_status - reports tx status of a packet if required
316 * @hw - a &struct ieee80211_hw pointer 409 * @hw - a &struct ieee80211_hw pointer
@@ -574,64 +667,120 @@ static void cs_set_control(struct zd_mac *mac, struct zd_ctrlset *cs,
574static int zd_mac_config_beacon(struct ieee80211_hw *hw, struct sk_buff *beacon) 667static int zd_mac_config_beacon(struct ieee80211_hw *hw, struct sk_buff *beacon)
575{ 668{
576 struct zd_mac *mac = zd_hw_mac(hw); 669 struct zd_mac *mac = zd_hw_mac(hw);
577 int r; 670 int r, ret, num_cmds, req_pos = 0;
578 u32 tmp, j = 0; 671 u32 tmp, j = 0;
579 /* 4 more bytes for tail CRC */ 672 /* 4 more bytes for tail CRC */
580 u32 full_len = beacon->len + 4; 673 u32 full_len = beacon->len + 4;
674 unsigned long end_jiffies, message_jiffies;
675 struct zd_ioreq32 *ioreqs;
676
677 /* Alloc memory for full beacon write at once. */
678 num_cmds = 1 + zd_chip_is_zd1211b(&mac->chip) + full_len;
679 ioreqs = kmalloc(num_cmds * sizeof(struct zd_ioreq32), GFP_KERNEL);
680 if (!ioreqs)
681 return -ENOMEM;
581 682
582 r = zd_iowrite32(&mac->chip, CR_BCN_FIFO_SEMAPHORE, 0); 683 mutex_lock(&mac->chip.mutex);
684
685 r = zd_iowrite32_locked(&mac->chip, 0, CR_BCN_FIFO_SEMAPHORE);
583 if (r < 0) 686 if (r < 0)
584 return r; 687 goto out;
585 r = zd_ioread32(&mac->chip, CR_BCN_FIFO_SEMAPHORE, &tmp); 688 r = zd_ioread32_locked(&mac->chip, &tmp, CR_BCN_FIFO_SEMAPHORE);
586 if (r < 0) 689 if (r < 0)
587 return r; 690 goto release_sema;
588 691
692 end_jiffies = jiffies + HZ / 2; /*~500ms*/
693 message_jiffies = jiffies + HZ / 10; /*~100ms*/
589 while (tmp & 0x2) { 694 while (tmp & 0x2) {
590 r = zd_ioread32(&mac->chip, CR_BCN_FIFO_SEMAPHORE, &tmp); 695 r = zd_ioread32_locked(&mac->chip, &tmp, CR_BCN_FIFO_SEMAPHORE);
591 if (r < 0) 696 if (r < 0)
592 return r; 697 goto release_sema;
593 if ((++j % 100) == 0) { 698 if (time_is_before_eq_jiffies(message_jiffies)) {
594 printk(KERN_ERR "CR_BCN_FIFO_SEMAPHORE not ready\n"); 699 message_jiffies = jiffies + HZ / 10;
595 if (j >= 500) { 700 dev_err(zd_mac_dev(mac),
596 printk(KERN_ERR "Giving up beacon config.\n"); 701 "CR_BCN_FIFO_SEMAPHORE not ready\n");
597 return -ETIMEDOUT; 702 if (time_is_before_eq_jiffies(end_jiffies)) {
703 dev_err(zd_mac_dev(mac),
704 "Giving up beacon config.\n");
705 r = -ETIMEDOUT;
706 goto reset_device;
598 } 707 }
599 } 708 }
600 msleep(1); 709 msleep(20);
601 } 710 }
602 711
603 r = zd_iowrite32(&mac->chip, CR_BCN_FIFO, full_len - 1); 712 ioreqs[req_pos].addr = CR_BCN_FIFO;
604 if (r < 0) 713 ioreqs[req_pos].value = full_len - 1;
605 return r; 714 req_pos++;
606 if (zd_chip_is_zd1211b(&mac->chip)) { 715 if (zd_chip_is_zd1211b(&mac->chip)) {
607 r = zd_iowrite32(&mac->chip, CR_BCN_LENGTH, full_len - 1); 716 ioreqs[req_pos].addr = CR_BCN_LENGTH;
608 if (r < 0) 717 ioreqs[req_pos].value = full_len - 1;
609 return r; 718 req_pos++;
610 } 719 }
611 720
612 for (j = 0 ; j < beacon->len; j++) { 721 for (j = 0 ; j < beacon->len; j++) {
613 r = zd_iowrite32(&mac->chip, CR_BCN_FIFO, 722 ioreqs[req_pos].addr = CR_BCN_FIFO;
614 *((u8 *)(beacon->data + j))); 723 ioreqs[req_pos].value = *((u8 *)(beacon->data + j));
615 if (r < 0) 724 req_pos++;
616 return r;
617 } 725 }
618 726
619 for (j = 0; j < 4; j++) { 727 for (j = 0; j < 4; j++) {
620 r = zd_iowrite32(&mac->chip, CR_BCN_FIFO, 0x0); 728 ioreqs[req_pos].addr = CR_BCN_FIFO;
621 if (r < 0) 729 ioreqs[req_pos].value = 0x0;
622 return r; 730 req_pos++;
623 } 731 }
624 732
625 r = zd_iowrite32(&mac->chip, CR_BCN_FIFO_SEMAPHORE, 1); 733 BUG_ON(req_pos != num_cmds);
626 if (r < 0) 734
627 return r; 735 r = zd_iowrite32a_locked(&mac->chip, ioreqs, num_cmds);
736
737release_sema:
738 /*
739 * Try very hard to release device beacon semaphore, as otherwise
740 * device/driver can be left in unusable state.
741 */
742 end_jiffies = jiffies + HZ / 2; /*~500ms*/
743 ret = zd_iowrite32_locked(&mac->chip, 1, CR_BCN_FIFO_SEMAPHORE);
744 while (ret < 0) {
745 if (time_is_before_eq_jiffies(end_jiffies)) {
746 ret = -ETIMEDOUT;
747 break;
748 }
749
750 msleep(20);
751 ret = zd_iowrite32_locked(&mac->chip, 1, CR_BCN_FIFO_SEMAPHORE);
752 }
753
754 if (ret < 0)
755 dev_err(zd_mac_dev(mac), "Could not release "
756 "CR_BCN_FIFO_SEMAPHORE!\n");
757 if (r < 0 || ret < 0) {
758 if (r >= 0)
759 r = ret;
760 goto out;
761 }
628 762
629 /* 802.11b/g 2.4G CCK 1Mb 763 /* 802.11b/g 2.4G CCK 1Mb
630 * 802.11a, not yet implemented, uses different values (see GPL vendor 764 * 802.11a, not yet implemented, uses different values (see GPL vendor
631 * driver) 765 * driver)
632 */ 766 */
633 return zd_iowrite32(&mac->chip, CR_BCN_PLCP_CFG, 0x00000400 | 767 r = zd_iowrite32_locked(&mac->chip, 0x00000400 | (full_len << 19),
634 (full_len << 19)); 768 CR_BCN_PLCP_CFG);
769out:
770 mutex_unlock(&mac->chip.mutex);
771 kfree(ioreqs);
772 return r;
773
774reset_device:
775 mutex_unlock(&mac->chip.mutex);
776 kfree(ioreqs);
777
778 /* semaphore stuck, reset device to avoid fw freeze later */
779 dev_warn(zd_mac_dev(mac), "CR_BCN_FIFO_SEMAPHORE stuck, "
780 "reseting device...");
781 usb_queue_reset_device(mac->chip.usb.intf);
782
783 return r;
635} 784}
636 785
637static int fill_ctrlset(struct zd_mac *mac, 786static int fill_ctrlset(struct zd_mac *mac,
@@ -779,6 +928,13 @@ static int filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr,
779 928
780 mac->ack_pending = 1; 929 mac->ack_pending = 1;
781 mac->ack_signal = stats->signal; 930 mac->ack_signal = stats->signal;
931
932 /* Prevent pending tx-packet on AP-mode */
933 if (mac->type == NL80211_IFTYPE_AP) {
934 skb = __skb_dequeue(q);
935 zd_mac_tx_status(hw, skb, mac->ack_signal, NULL);
936 mac->ack_pending = 0;
937 }
782 } 938 }
783 939
784 spin_unlock_irqrestore(&q->lock, flags); 940 spin_unlock_irqrestore(&q->lock, flags);
@@ -882,13 +1038,16 @@ static int zd_op_add_interface(struct ieee80211_hw *hw,
882 case NL80211_IFTYPE_MESH_POINT: 1038 case NL80211_IFTYPE_MESH_POINT:
883 case NL80211_IFTYPE_STATION: 1039 case NL80211_IFTYPE_STATION:
884 case NL80211_IFTYPE_ADHOC: 1040 case NL80211_IFTYPE_ADHOC:
1041 case NL80211_IFTYPE_AP:
885 mac->type = vif->type; 1042 mac->type = vif->type;
886 break; 1043 break;
887 default: 1044 default:
888 return -EOPNOTSUPP; 1045 return -EOPNOTSUPP;
889 } 1046 }
890 1047
891 return zd_write_mac_addr(&mac->chip, vif->addr); 1048 mac->vif = vif;
1049
1050 return set_mac_and_bssid(mac);
892} 1051}
893 1052
894static void zd_op_remove_interface(struct ieee80211_hw *hw, 1053static void zd_op_remove_interface(struct ieee80211_hw *hw,
@@ -896,7 +1055,8 @@ static void zd_op_remove_interface(struct ieee80211_hw *hw,
896{ 1055{
897 struct zd_mac *mac = zd_hw_mac(hw); 1056 struct zd_mac *mac = zd_hw_mac(hw);
898 mac->type = NL80211_IFTYPE_UNSPECIFIED; 1057 mac->type = NL80211_IFTYPE_UNSPECIFIED;
899 zd_set_beacon_interval(&mac->chip, 0); 1058 mac->vif = NULL;
1059 zd_set_beacon_interval(&mac->chip, 0, 0, NL80211_IFTYPE_UNSPECIFIED);
900 zd_write_mac_addr(&mac->chip, NULL); 1060 zd_write_mac_addr(&mac->chip, NULL);
901} 1061}
902 1062
@@ -905,49 +1065,67 @@ static int zd_op_config(struct ieee80211_hw *hw, u32 changed)
905 struct zd_mac *mac = zd_hw_mac(hw); 1065 struct zd_mac *mac = zd_hw_mac(hw);
906 struct ieee80211_conf *conf = &hw->conf; 1066 struct ieee80211_conf *conf = &hw->conf;
907 1067
1068 spin_lock_irq(&mac->lock);
1069 mac->channel = conf->channel->hw_value;
1070 spin_unlock_irq(&mac->lock);
1071
908 return zd_chip_set_channel(&mac->chip, conf->channel->hw_value); 1072 return zd_chip_set_channel(&mac->chip, conf->channel->hw_value);
909} 1073}
910 1074
911static void zd_process_intr(struct work_struct *work) 1075static void zd_beacon_done(struct zd_mac *mac)
912{ 1076{
913 u16 int_status; 1077 struct sk_buff *skb, *beacon;
914 struct zd_mac *mac = container_of(work, struct zd_mac, process_intr);
915 1078
916 int_status = le16_to_cpu(*(__le16 *)(mac->intr_buffer+4)); 1079 if (!test_bit(ZD_DEVICE_RUNNING, &mac->flags))
917 if (int_status & INT_CFG_NEXT_BCN) 1080 return;
918 dev_dbg_f_limit(zd_mac_dev(mac), "INT_CFG_NEXT_BCN\n"); 1081 if (!mac->vif || mac->vif->type != NL80211_IFTYPE_AP)
919 else 1082 return;
920 dev_dbg_f(zd_mac_dev(mac), "Unsupported interrupt\n");
921
922 zd_chip_enable_hwint(&mac->chip);
923}
924 1083
1084 /*
1085 * Send out buffered broad- and multicast frames.
1086 */
1087 while (!ieee80211_queue_stopped(mac->hw, 0)) {
1088 skb = ieee80211_get_buffered_bc(mac->hw, mac->vif);
1089 if (!skb)
1090 break;
1091 zd_op_tx(mac->hw, skb);
1092 }
925 1093
926static void set_multicast_hash_handler(struct work_struct *work) 1094 /*
927{ 1095 * Fetch next beacon so that tim_count is updated.
928 struct zd_mac *mac = 1096 */
929 container_of(work, struct zd_mac, set_multicast_hash_work); 1097 beacon = ieee80211_beacon_get(mac->hw, mac->vif);
930 struct zd_mc_hash hash; 1098 if (beacon) {
1099 zd_mac_config_beacon(mac->hw, beacon);
1100 kfree_skb(beacon);
1101 }
931 1102
932 spin_lock_irq(&mac->lock); 1103 spin_lock_irq(&mac->lock);
933 hash = mac->multicast_hash; 1104 mac->beacon.last_update = jiffies;
934 spin_unlock_irq(&mac->lock); 1105 spin_unlock_irq(&mac->lock);
935
936 zd_chip_set_multicast_hash(&mac->chip, &hash);
937} 1106}
938 1107
939static void set_rx_filter_handler(struct work_struct *work) 1108static void zd_process_intr(struct work_struct *work)
940{ 1109{
941 struct zd_mac *mac = 1110 u16 int_status;
942 container_of(work, struct zd_mac, set_rx_filter_work); 1111 unsigned long flags;
943 int r; 1112 struct zd_mac *mac = container_of(work, struct zd_mac, process_intr);
944 1113
945 dev_dbg_f(zd_mac_dev(mac), "\n"); 1114 spin_lock_irqsave(&mac->lock, flags);
946 r = set_rx_filter(mac); 1115 int_status = le16_to_cpu(*(__le16 *)(mac->intr_buffer + 4));
947 if (r) 1116 spin_unlock_irqrestore(&mac->lock, flags);
948 dev_err(zd_mac_dev(mac), "set_rx_filter_handler error %d\n", r); 1117
1118 if (int_status & INT_CFG_NEXT_BCN) {
1119 /*dev_dbg_f_limit(zd_mac_dev(mac), "INT_CFG_NEXT_BCN\n");*/
1120 zd_beacon_done(mac);
1121 } else {
1122 dev_dbg_f(zd_mac_dev(mac), "Unsupported interrupt\n");
1123 }
1124
1125 zd_chip_enable_hwint(&mac->chip);
949} 1126}
950 1127
1128
951static u64 zd_op_prepare_multicast(struct ieee80211_hw *hw, 1129static u64 zd_op_prepare_multicast(struct ieee80211_hw *hw,
952 struct netdev_hw_addr_list *mc_list) 1130 struct netdev_hw_addr_list *mc_list)
953{ 1131{
@@ -979,6 +1157,7 @@ static void zd_op_configure_filter(struct ieee80211_hw *hw,
979 }; 1157 };
980 struct zd_mac *mac = zd_hw_mac(hw); 1158 struct zd_mac *mac = zd_hw_mac(hw);
981 unsigned long flags; 1159 unsigned long flags;
1160 int r;
982 1161
983 /* Only deal with supported flags */ 1162 /* Only deal with supported flags */
984 changed_flags &= SUPPORTED_FIF_FLAGS; 1163 changed_flags &= SUPPORTED_FIF_FLAGS;
@@ -1000,11 +1179,13 @@ static void zd_op_configure_filter(struct ieee80211_hw *hw,
1000 mac->multicast_hash = hash; 1179 mac->multicast_hash = hash;
1001 spin_unlock_irqrestore(&mac->lock, flags); 1180 spin_unlock_irqrestore(&mac->lock, flags);
1002 1181
1003 /* XXX: these can be called here now, can sleep now! */ 1182 zd_chip_set_multicast_hash(&mac->chip, &hash);
1004 queue_work(zd_workqueue, &mac->set_multicast_hash_work);
1005 1183
1006 if (changed_flags & FIF_CONTROL) 1184 if (changed_flags & FIF_CONTROL) {
1007 queue_work(zd_workqueue, &mac->set_rx_filter_work); 1185 r = set_rx_filter(mac);
1186 if (r)
1187 dev_err(zd_mac_dev(mac), "set_rx_filter error %d\n", r);
1188 }
1008 1189
1009 /* no handling required for FIF_OTHER_BSS as we don't currently 1190 /* no handling required for FIF_OTHER_BSS as we don't currently
1010 * do BSSID filtering */ 1191 * do BSSID filtering */
@@ -1016,20 +1197,9 @@ static void zd_op_configure_filter(struct ieee80211_hw *hw,
1016 * time. */ 1197 * time. */
1017} 1198}
1018 1199
1019static void set_rts_cts_work(struct work_struct *work) 1200static void set_rts_cts(struct zd_mac *mac, unsigned int short_preamble)
1020{ 1201{
1021 struct zd_mac *mac =
1022 container_of(work, struct zd_mac, set_rts_cts_work);
1023 unsigned long flags;
1024 unsigned int short_preamble;
1025
1026 mutex_lock(&mac->chip.mutex); 1202 mutex_lock(&mac->chip.mutex);
1027
1028 spin_lock_irqsave(&mac->lock, flags);
1029 mac->updating_rts_rate = 0;
1030 short_preamble = mac->short_preamble;
1031 spin_unlock_irqrestore(&mac->lock, flags);
1032
1033 zd_chip_set_rts_cts_rate_locked(&mac->chip, short_preamble); 1203 zd_chip_set_rts_cts_rate_locked(&mac->chip, short_preamble);
1034 mutex_unlock(&mac->chip.mutex); 1204 mutex_unlock(&mac->chip.mutex);
1035} 1205}
@@ -1040,33 +1210,42 @@ static void zd_op_bss_info_changed(struct ieee80211_hw *hw,
1040 u32 changes) 1210 u32 changes)
1041{ 1211{
1042 struct zd_mac *mac = zd_hw_mac(hw); 1212 struct zd_mac *mac = zd_hw_mac(hw);
1043 unsigned long flags;
1044 int associated; 1213 int associated;
1045 1214
1046 dev_dbg_f(zd_mac_dev(mac), "changes: %x\n", changes); 1215 dev_dbg_f(zd_mac_dev(mac), "changes: %x\n", changes);
1047 1216
1048 if (mac->type == NL80211_IFTYPE_MESH_POINT || 1217 if (mac->type == NL80211_IFTYPE_MESH_POINT ||
1049 mac->type == NL80211_IFTYPE_ADHOC) { 1218 mac->type == NL80211_IFTYPE_ADHOC ||
1219 mac->type == NL80211_IFTYPE_AP) {
1050 associated = true; 1220 associated = true;
1051 if (changes & BSS_CHANGED_BEACON) { 1221 if (changes & BSS_CHANGED_BEACON) {
1052 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif); 1222 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
1053 1223
1054 if (beacon) { 1224 if (beacon) {
1225 zd_chip_disable_hwint(&mac->chip);
1055 zd_mac_config_beacon(hw, beacon); 1226 zd_mac_config_beacon(hw, beacon);
1227 zd_chip_enable_hwint(&mac->chip);
1056 kfree_skb(beacon); 1228 kfree_skb(beacon);
1057 } 1229 }
1058 } 1230 }
1059 1231
1060 if (changes & BSS_CHANGED_BEACON_ENABLED) { 1232 if (changes & BSS_CHANGED_BEACON_ENABLED) {
1061 u32 interval; 1233 u16 interval = 0;
1234 u8 period = 0;
1062 1235
1063 if (bss_conf->enable_beacon) 1236 if (bss_conf->enable_beacon) {
1064 interval = BCN_MODE_IBSS | 1237 period = bss_conf->dtim_period;
1065 bss_conf->beacon_int; 1238 interval = bss_conf->beacon_int;
1066 else 1239 }
1067 interval = 0;
1068 1240
1069 zd_set_beacon_interval(&mac->chip, interval); 1241 spin_lock_irq(&mac->lock);
1242 mac->beacon.period = period;
1243 mac->beacon.interval = interval;
1244 mac->beacon.last_update = jiffies;
1245 spin_unlock_irq(&mac->lock);
1246
1247 zd_set_beacon_interval(&mac->chip, interval, period,
1248 mac->type);
1070 } 1249 }
1071 } else 1250 } else
1072 associated = is_valid_ether_addr(bss_conf->bssid); 1251 associated = is_valid_ether_addr(bss_conf->bssid);
@@ -1078,15 +1257,11 @@ static void zd_op_bss_info_changed(struct ieee80211_hw *hw,
1078 /* TODO: do hardware bssid filtering */ 1257 /* TODO: do hardware bssid filtering */
1079 1258
1080 if (changes & BSS_CHANGED_ERP_PREAMBLE) { 1259 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
1081 spin_lock_irqsave(&mac->lock, flags); 1260 spin_lock_irq(&mac->lock);
1082 mac->short_preamble = bss_conf->use_short_preamble; 1261 mac->short_preamble = bss_conf->use_short_preamble;
1083 if (!mac->updating_rts_rate) { 1262 spin_unlock_irq(&mac->lock);
1084 mac->updating_rts_rate = 1; 1263
1085 /* FIXME: should disable TX here, until work has 1264 set_rts_cts(mac, bss_conf->use_short_preamble);
1086 * completed and RTS_CTS reg is updated */
1087 queue_work(zd_workqueue, &mac->set_rts_cts_work);
1088 }
1089 spin_unlock_irqrestore(&mac->lock, flags);
1090 } 1265 }
1091} 1266}
1092 1267
@@ -1138,12 +1313,14 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
1138 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &mac->band; 1313 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &mac->band;
1139 1314
1140 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | 1315 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
1141 IEEE80211_HW_SIGNAL_UNSPEC; 1316 IEEE80211_HW_SIGNAL_UNSPEC |
1317 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING;
1142 1318
1143 hw->wiphy->interface_modes = 1319 hw->wiphy->interface_modes =
1144 BIT(NL80211_IFTYPE_MESH_POINT) | 1320 BIT(NL80211_IFTYPE_MESH_POINT) |
1145 BIT(NL80211_IFTYPE_STATION) | 1321 BIT(NL80211_IFTYPE_STATION) |
1146 BIT(NL80211_IFTYPE_ADHOC); 1322 BIT(NL80211_IFTYPE_ADHOC) |
1323 BIT(NL80211_IFTYPE_AP);
1147 1324
1148 hw->max_signal = 100; 1325 hw->max_signal = 100;
1149 hw->queues = 1; 1326 hw->queues = 1;
@@ -1160,15 +1337,82 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
1160 1337
1161 zd_chip_init(&mac->chip, hw, intf); 1338 zd_chip_init(&mac->chip, hw, intf);
1162 housekeeping_init(mac); 1339 housekeeping_init(mac);
1163 INIT_WORK(&mac->set_multicast_hash_work, set_multicast_hash_handler); 1340 beacon_init(mac);
1164 INIT_WORK(&mac->set_rts_cts_work, set_rts_cts_work);
1165 INIT_WORK(&mac->set_rx_filter_work, set_rx_filter_handler);
1166 INIT_WORK(&mac->process_intr, zd_process_intr); 1341 INIT_WORK(&mac->process_intr, zd_process_intr);
1167 1342
1168 SET_IEEE80211_DEV(hw, &intf->dev); 1343 SET_IEEE80211_DEV(hw, &intf->dev);
1169 return hw; 1344 return hw;
1170} 1345}
1171 1346
1347#define BEACON_WATCHDOG_DELAY round_jiffies_relative(HZ)
1348
1349static void beacon_watchdog_handler(struct work_struct *work)
1350{
1351 struct zd_mac *mac =
1352 container_of(work, struct zd_mac, beacon.watchdog_work.work);
1353 struct sk_buff *beacon;
1354 unsigned long timeout;
1355 int interval, period;
1356
1357 if (!test_bit(ZD_DEVICE_RUNNING, &mac->flags))
1358 goto rearm;
1359 if (mac->type != NL80211_IFTYPE_AP || !mac->vif)
1360 goto rearm;
1361
1362 spin_lock_irq(&mac->lock);
1363 interval = mac->beacon.interval;
1364 period = mac->beacon.period;
1365 timeout = mac->beacon.last_update + msecs_to_jiffies(interval) + HZ;
1366 spin_unlock_irq(&mac->lock);
1367
1368 if (interval > 0 && time_is_before_jiffies(timeout)) {
1369 dev_dbg_f(zd_mac_dev(mac), "beacon interrupt stalled, "
1370 "restarting. "
1371 "(interval: %d, dtim: %d)\n",
1372 interval, period);
1373
1374 zd_chip_disable_hwint(&mac->chip);
1375
1376 beacon = ieee80211_beacon_get(mac->hw, mac->vif);
1377 if (beacon) {
1378 zd_mac_config_beacon(mac->hw, beacon);
1379 kfree_skb(beacon);
1380 }
1381
1382 zd_set_beacon_interval(&mac->chip, interval, period, mac->type);
1383
1384 zd_chip_enable_hwint(&mac->chip);
1385
1386 spin_lock_irq(&mac->lock);
1387 mac->beacon.last_update = jiffies;
1388 spin_unlock_irq(&mac->lock);
1389 }
1390
1391rearm:
1392 queue_delayed_work(zd_workqueue, &mac->beacon.watchdog_work,
1393 BEACON_WATCHDOG_DELAY);
1394}
1395
1396static void beacon_init(struct zd_mac *mac)
1397{
1398 INIT_DELAYED_WORK(&mac->beacon.watchdog_work, beacon_watchdog_handler);
1399}
1400
1401static void beacon_enable(struct zd_mac *mac)
1402{
1403 dev_dbg_f(zd_mac_dev(mac), "\n");
1404
1405 mac->beacon.last_update = jiffies;
1406 queue_delayed_work(zd_workqueue, &mac->beacon.watchdog_work,
1407 BEACON_WATCHDOG_DELAY);
1408}
1409
1410static void beacon_disable(struct zd_mac *mac)
1411{
1412 dev_dbg_f(zd_mac_dev(mac), "\n");
1413 cancel_delayed_work_sync(&mac->beacon.watchdog_work);
1414}
1415
1172#define LINK_LED_WORK_DELAY HZ 1416#define LINK_LED_WORK_DELAY HZ
1173 1417
1174static void link_led_handler(struct work_struct *work) 1418static void link_led_handler(struct work_struct *work)
@@ -1179,6 +1423,9 @@ static void link_led_handler(struct work_struct *work)
1179 int is_associated; 1423 int is_associated;
1180 int r; 1424 int r;
1181 1425
1426 if (!test_bit(ZD_DEVICE_RUNNING, &mac->flags))
1427 goto requeue;
1428
1182 spin_lock_irq(&mac->lock); 1429 spin_lock_irq(&mac->lock);
1183 is_associated = mac->associated; 1430 is_associated = mac->associated;
1184 spin_unlock_irq(&mac->lock); 1431 spin_unlock_irq(&mac->lock);
@@ -1188,6 +1435,7 @@ static void link_led_handler(struct work_struct *work)
1188 if (r) 1435 if (r)
1189 dev_dbg_f(zd_mac_dev(mac), "zd_chip_control_leds error %d\n", r); 1436 dev_dbg_f(zd_mac_dev(mac), "zd_chip_control_leds error %d\n", r);
1190 1437
1438requeue:
1191 queue_delayed_work(zd_workqueue, &mac->housekeeping.link_led_work, 1439 queue_delayed_work(zd_workqueue, &mac->housekeeping.link_led_work,
1192 LINK_LED_WORK_DELAY); 1440 LINK_LED_WORK_DELAY);
1193} 1441}
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h
index a6d86b996c7..f8c93c3fe75 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.h
+++ b/drivers/net/wireless/zd1211rw/zd_mac.h
@@ -163,6 +163,17 @@ struct housekeeping {
163 struct delayed_work link_led_work; 163 struct delayed_work link_led_work;
164}; 164};
165 165
166struct beacon {
167 struct delayed_work watchdog_work;
168 unsigned long last_update;
169 u16 interval;
170 u8 period;
171};
172
173enum zd_device_flags {
174 ZD_DEVICE_RUNNING,
175};
176
166#define ZD_MAC_STATS_BUFFER_SIZE 16 177#define ZD_MAC_STATS_BUFFER_SIZE 16
167 178
168#define ZD_MAC_MAX_ACK_WAITERS 50 179#define ZD_MAC_MAX_ACK_WAITERS 50
@@ -172,17 +183,19 @@ struct zd_mac {
172 spinlock_t lock; 183 spinlock_t lock;
173 spinlock_t intr_lock; 184 spinlock_t intr_lock;
174 struct ieee80211_hw *hw; 185 struct ieee80211_hw *hw;
186 struct ieee80211_vif *vif;
175 struct housekeeping housekeeping; 187 struct housekeeping housekeeping;
176 struct work_struct set_multicast_hash_work; 188 struct beacon beacon;
177 struct work_struct set_rts_cts_work; 189 struct work_struct set_rts_cts_work;
178 struct work_struct set_rx_filter_work;
179 struct work_struct process_intr; 190 struct work_struct process_intr;
180 struct zd_mc_hash multicast_hash; 191 struct zd_mc_hash multicast_hash;
181 u8 intr_buffer[USB_MAX_EP_INT_BUFFER]; 192 u8 intr_buffer[USB_MAX_EP_INT_BUFFER];
182 u8 regdomain; 193 u8 regdomain;
183 u8 default_regdomain; 194 u8 default_regdomain;
195 u8 channel;
184 int type; 196 int type;
185 int associated; 197 int associated;
198 unsigned long flags;
186 struct sk_buff_head ack_wait_queue; 199 struct sk_buff_head ack_wait_queue;
187 struct ieee80211_channel channels[14]; 200 struct ieee80211_channel channels[14];
188 struct ieee80211_rate rates[12]; 201 struct ieee80211_rate rates[12];
@@ -191,9 +204,6 @@ struct zd_mac {
191 /* Short preamble (used for RTS/CTS) */ 204 /* Short preamble (used for RTS/CTS) */
192 unsigned int short_preamble:1; 205 unsigned int short_preamble:1;
193 206
194 /* flags to indicate update in progress */
195 unsigned int updating_rts_rate:1;
196
197 /* whether to pass frames with CRC errors to stack */ 207 /* whether to pass frames with CRC errors to stack */
198 unsigned int pass_failed_fcs:1; 208 unsigned int pass_failed_fcs:1;
199 209
@@ -304,6 +314,10 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length);
304void zd_mac_tx_failed(struct urb *urb); 314void zd_mac_tx_failed(struct urb *urb);
305void zd_mac_tx_to_dev(struct sk_buff *skb, int error); 315void zd_mac_tx_to_dev(struct sk_buff *skb, int error);
306 316
317int zd_op_start(struct ieee80211_hw *hw);
318void zd_op_stop(struct ieee80211_hw *hw);
319int zd_restore_settings(struct zd_mac *mac);
320
307#ifdef DEBUG 321#ifdef DEBUG
308void zd_dump_rx_status(const struct rx_status *status); 322void zd_dump_rx_status(const struct rx_status *status);
309#else 323#else
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 06041cb1c42..81e80489a05 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -377,8 +377,10 @@ static inline void handle_regs_int(struct urb *urb)
377 int_num = le16_to_cpu(*(__le16 *)(urb->transfer_buffer+2)); 377 int_num = le16_to_cpu(*(__le16 *)(urb->transfer_buffer+2));
378 if (int_num == CR_INTERRUPT) { 378 if (int_num == CR_INTERRUPT) {
379 struct zd_mac *mac = zd_hw_mac(zd_usb_to_hw(urb->context)); 379 struct zd_mac *mac = zd_hw_mac(zd_usb_to_hw(urb->context));
380 spin_lock(&mac->lock);
380 memcpy(&mac->intr_buffer, urb->transfer_buffer, 381 memcpy(&mac->intr_buffer, urb->transfer_buffer,
381 USB_MAX_EP_INT_BUFFER); 382 USB_MAX_EP_INT_BUFFER);
383 spin_unlock(&mac->lock);
382 schedule_work(&mac->process_intr); 384 schedule_work(&mac->process_intr);
383 } else if (intr->read_regs_enabled) { 385 } else if (intr->read_regs_enabled) {
384 intr->read_regs.length = len = urb->actual_length; 386 intr->read_regs.length = len = urb->actual_length;
@@ -409,8 +411,10 @@ static void int_urb_complete(struct urb *urb)
409 case -ENOENT: 411 case -ENOENT:
410 case -ECONNRESET: 412 case -ECONNRESET:
411 case -EPIPE: 413 case -EPIPE:
412 goto kfree; 414 dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
415 return;
413 default: 416 default:
417 dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
414 goto resubmit; 418 goto resubmit;
415 } 419 }
416 420
@@ -441,12 +445,11 @@ static void int_urb_complete(struct urb *urb)
441resubmit: 445resubmit:
442 r = usb_submit_urb(urb, GFP_ATOMIC); 446 r = usb_submit_urb(urb, GFP_ATOMIC);
443 if (r) { 447 if (r) {
444 dev_dbg_f(urb_dev(urb), "resubmit urb %p\n", urb); 448 dev_dbg_f(urb_dev(urb), "error: resubmit urb %p err code %d\n",
445 goto kfree; 449 urb, r);
450 /* TODO: add worker to reset intr->urb */
446 } 451 }
447 return; 452 return;
448kfree:
449 kfree(urb->transfer_buffer);
450} 453}
451 454
452static inline int int_urb_interval(struct usb_device *udev) 455static inline int int_urb_interval(struct usb_device *udev)
@@ -477,9 +480,8 @@ static inline int usb_int_enabled(struct zd_usb *usb)
477int zd_usb_enable_int(struct zd_usb *usb) 480int zd_usb_enable_int(struct zd_usb *usb)
478{ 481{
479 int r; 482 int r;
480 struct usb_device *udev; 483 struct usb_device *udev = zd_usb_to_usbdev(usb);
481 struct zd_usb_interrupt *intr = &usb->intr; 484 struct zd_usb_interrupt *intr = &usb->intr;
482 void *transfer_buffer = NULL;
483 struct urb *urb; 485 struct urb *urb;
484 486
485 dev_dbg_f(zd_usb_dev(usb), "\n"); 487 dev_dbg_f(zd_usb_dev(usb), "\n");
@@ -500,20 +502,21 @@ int zd_usb_enable_int(struct zd_usb *usb)
500 intr->urb = urb; 502 intr->urb = urb;
501 spin_unlock_irq(&intr->lock); 503 spin_unlock_irq(&intr->lock);
502 504
503 /* TODO: make it a DMA buffer */
504 r = -ENOMEM; 505 r = -ENOMEM;
505 transfer_buffer = kmalloc(USB_MAX_EP_INT_BUFFER, GFP_KERNEL); 506 intr->buffer = usb_alloc_coherent(udev, USB_MAX_EP_INT_BUFFER,
506 if (!transfer_buffer) { 507 GFP_KERNEL, &intr->buffer_dma);
508 if (!intr->buffer) {
507 dev_dbg_f(zd_usb_dev(usb), 509 dev_dbg_f(zd_usb_dev(usb),
508 "couldn't allocate transfer_buffer\n"); 510 "couldn't allocate transfer_buffer\n");
509 goto error_set_urb_null; 511 goto error_set_urb_null;
510 } 512 }
511 513
512 udev = zd_usb_to_usbdev(usb);
513 usb_fill_int_urb(urb, udev, usb_rcvintpipe(udev, EP_INT_IN), 514 usb_fill_int_urb(urb, udev, usb_rcvintpipe(udev, EP_INT_IN),
514 transfer_buffer, USB_MAX_EP_INT_BUFFER, 515 intr->buffer, USB_MAX_EP_INT_BUFFER,
515 int_urb_complete, usb, 516 int_urb_complete, usb,
516 intr->interval); 517 intr->interval);
518 urb->transfer_dma = intr->buffer_dma;
519 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
517 520
518 dev_dbg_f(zd_usb_dev(usb), "submit urb %p\n", intr->urb); 521 dev_dbg_f(zd_usb_dev(usb), "submit urb %p\n", intr->urb);
519 r = usb_submit_urb(urb, GFP_KERNEL); 522 r = usb_submit_urb(urb, GFP_KERNEL);
@@ -525,7 +528,8 @@ int zd_usb_enable_int(struct zd_usb *usb)
525 528
526 return 0; 529 return 0;
527error: 530error:
528 kfree(transfer_buffer); 531 usb_free_coherent(udev, USB_MAX_EP_INT_BUFFER,
532 intr->buffer, intr->buffer_dma);
529error_set_urb_null: 533error_set_urb_null:
530 spin_lock_irq(&intr->lock); 534 spin_lock_irq(&intr->lock);
531 intr->urb = NULL; 535 intr->urb = NULL;
@@ -539,8 +543,11 @@ out:
539void zd_usb_disable_int(struct zd_usb *usb) 543void zd_usb_disable_int(struct zd_usb *usb)
540{ 544{
541 unsigned long flags; 545 unsigned long flags;
546 struct usb_device *udev = zd_usb_to_usbdev(usb);
542 struct zd_usb_interrupt *intr = &usb->intr; 547 struct zd_usb_interrupt *intr = &usb->intr;
543 struct urb *urb; 548 struct urb *urb;
549 void *buffer;
550 dma_addr_t buffer_dma;
544 551
545 spin_lock_irqsave(&intr->lock, flags); 552 spin_lock_irqsave(&intr->lock, flags);
546 urb = intr->urb; 553 urb = intr->urb;
@@ -549,11 +556,18 @@ void zd_usb_disable_int(struct zd_usb *usb)
549 return; 556 return;
550 } 557 }
551 intr->urb = NULL; 558 intr->urb = NULL;
559 buffer = intr->buffer;
560 buffer_dma = intr->buffer_dma;
561 intr->buffer = NULL;
552 spin_unlock_irqrestore(&intr->lock, flags); 562 spin_unlock_irqrestore(&intr->lock, flags);
553 563
554 usb_kill_urb(urb); 564 usb_kill_urb(urb);
555 dev_dbg_f(zd_usb_dev(usb), "urb %p killed\n", urb); 565 dev_dbg_f(zd_usb_dev(usb), "urb %p killed\n", urb);
556 usb_free_urb(urb); 566 usb_free_urb(urb);
567
568 if (buffer)
569 usb_free_coherent(udev, USB_MAX_EP_INT_BUFFER,
570 buffer, buffer_dma);
557} 571}
558 572
559static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer, 573static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer,
@@ -601,6 +615,7 @@ static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer,
601 615
602static void rx_urb_complete(struct urb *urb) 616static void rx_urb_complete(struct urb *urb)
603{ 617{
618 int r;
604 struct zd_usb *usb; 619 struct zd_usb *usb;
605 struct zd_usb_rx *rx; 620 struct zd_usb_rx *rx;
606 const u8 *buffer; 621 const u8 *buffer;
@@ -615,6 +630,7 @@ static void rx_urb_complete(struct urb *urb)
615 case -ENOENT: 630 case -ENOENT:
616 case -ECONNRESET: 631 case -ECONNRESET:
617 case -EPIPE: 632 case -EPIPE:
633 dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
618 return; 634 return;
619 default: 635 default:
620 dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status); 636 dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
@@ -626,6 +642,8 @@ static void rx_urb_complete(struct urb *urb)
626 usb = urb->context; 642 usb = urb->context;
627 rx = &usb->rx; 643 rx = &usb->rx;
628 644
645 zd_usb_reset_rx_idle_timer(usb);
646
629 if (length%rx->usb_packet_size > rx->usb_packet_size-4) { 647 if (length%rx->usb_packet_size > rx->usb_packet_size-4) {
630 /* If there is an old first fragment, we don't care. */ 648 /* If there is an old first fragment, we don't care. */
631 dev_dbg_f(urb_dev(urb), "*** first fragment ***\n"); 649 dev_dbg_f(urb_dev(urb), "*** first fragment ***\n");
@@ -654,7 +672,9 @@ static void rx_urb_complete(struct urb *urb)
654 } 672 }
655 673
656resubmit: 674resubmit:
657 usb_submit_urb(urb, GFP_ATOMIC); 675 r = usb_submit_urb(urb, GFP_ATOMIC);
676 if (r)
677 dev_dbg_f(urb_dev(urb), "urb %p resubmit error %d\n", urb, r);
658} 678}
659 679
660static struct urb *alloc_rx_urb(struct zd_usb *usb) 680static struct urb *alloc_rx_urb(struct zd_usb *usb)
@@ -690,7 +710,7 @@ static void free_rx_urb(struct urb *urb)
690 usb_free_urb(urb); 710 usb_free_urb(urb);
691} 711}
692 712
693int zd_usb_enable_rx(struct zd_usb *usb) 713static int __zd_usb_enable_rx(struct zd_usb *usb)
694{ 714{
695 int i, r; 715 int i, r;
696 struct zd_usb_rx *rx = &usb->rx; 716 struct zd_usb_rx *rx = &usb->rx;
@@ -742,7 +762,21 @@ error:
742 return r; 762 return r;
743} 763}
744 764
745void zd_usb_disable_rx(struct zd_usb *usb) 765int zd_usb_enable_rx(struct zd_usb *usb)
766{
767 int r;
768 struct zd_usb_rx *rx = &usb->rx;
769
770 mutex_lock(&rx->setup_mutex);
771 r = __zd_usb_enable_rx(usb);
772 mutex_unlock(&rx->setup_mutex);
773
774 zd_usb_reset_rx_idle_timer(usb);
775
776 return r;
777}
778
779static void __zd_usb_disable_rx(struct zd_usb *usb)
746{ 780{
747 int i; 781 int i;
748 unsigned long flags; 782 unsigned long flags;
@@ -769,6 +803,40 @@ void zd_usb_disable_rx(struct zd_usb *usb)
769 spin_unlock_irqrestore(&rx->lock, flags); 803 spin_unlock_irqrestore(&rx->lock, flags);
770} 804}
771 805
806void zd_usb_disable_rx(struct zd_usb *usb)
807{
808 struct zd_usb_rx *rx = &usb->rx;
809
810 mutex_lock(&rx->setup_mutex);
811 __zd_usb_disable_rx(usb);
812 mutex_unlock(&rx->setup_mutex);
813
814 cancel_delayed_work_sync(&rx->idle_work);
815}
816
817static void zd_usb_reset_rx(struct zd_usb *usb)
818{
819 bool do_reset;
820 struct zd_usb_rx *rx = &usb->rx;
821 unsigned long flags;
822
823 mutex_lock(&rx->setup_mutex);
824
825 spin_lock_irqsave(&rx->lock, flags);
826 do_reset = rx->urbs != NULL;
827 spin_unlock_irqrestore(&rx->lock, flags);
828
829 if (do_reset) {
830 __zd_usb_disable_rx(usb);
831 __zd_usb_enable_rx(usb);
832 }
833
834 mutex_unlock(&rx->setup_mutex);
835
836 if (do_reset)
837 zd_usb_reset_rx_idle_timer(usb);
838}
839
772/** 840/**
773 * zd_usb_disable_tx - disable transmission 841 * zd_usb_disable_tx - disable transmission
774 * @usb: the zd1211rw-private USB structure 842 * @usb: the zd1211rw-private USB structure
@@ -779,19 +847,21 @@ void zd_usb_disable_tx(struct zd_usb *usb)
779{ 847{
780 struct zd_usb_tx *tx = &usb->tx; 848 struct zd_usb_tx *tx = &usb->tx;
781 unsigned long flags; 849 unsigned long flags;
782 struct list_head *pos, *n; 850
851 atomic_set(&tx->enabled, 0);
852
853 /* kill all submitted tx-urbs */
854 usb_kill_anchored_urbs(&tx->submitted);
783 855
784 spin_lock_irqsave(&tx->lock, flags); 856 spin_lock_irqsave(&tx->lock, flags);
785 list_for_each_safe(pos, n, &tx->free_urb_list) { 857 WARN_ON(!skb_queue_empty(&tx->submitted_skbs));
786 list_del(pos); 858 WARN_ON(tx->submitted_urbs != 0);
787 usb_free_urb(list_entry(pos, struct urb, urb_list));
788 }
789 tx->enabled = 0;
790 tx->submitted_urbs = 0; 859 tx->submitted_urbs = 0;
860 spin_unlock_irqrestore(&tx->lock, flags);
861
791 /* The stopped state is ignored, relying on ieee80211_wake_queues() 862 /* The stopped state is ignored, relying on ieee80211_wake_queues()
792 * in a potentionally following zd_usb_enable_tx(). 863 * in a potentionally following zd_usb_enable_tx().
793 */ 864 */
794 spin_unlock_irqrestore(&tx->lock, flags);
795} 865}
796 866
797/** 867/**
@@ -807,63 +877,13 @@ void zd_usb_enable_tx(struct zd_usb *usb)
807 struct zd_usb_tx *tx = &usb->tx; 877 struct zd_usb_tx *tx = &usb->tx;
808 878
809 spin_lock_irqsave(&tx->lock, flags); 879 spin_lock_irqsave(&tx->lock, flags);
810 tx->enabled = 1; 880 atomic_set(&tx->enabled, 1);
811 tx->submitted_urbs = 0; 881 tx->submitted_urbs = 0;
812 ieee80211_wake_queues(zd_usb_to_hw(usb)); 882 ieee80211_wake_queues(zd_usb_to_hw(usb));
813 tx->stopped = 0; 883 tx->stopped = 0;
814 spin_unlock_irqrestore(&tx->lock, flags); 884 spin_unlock_irqrestore(&tx->lock, flags);
815} 885}
816 886
817/**
818 * alloc_tx_urb - provides an tx URB
819 * @usb: a &struct zd_usb pointer
820 *
821 * Allocates a new URB. If possible takes the urb from the free list in
822 * usb->tx.
823 */
824static struct urb *alloc_tx_urb(struct zd_usb *usb)
825{
826 struct zd_usb_tx *tx = &usb->tx;
827 unsigned long flags;
828 struct list_head *entry;
829 struct urb *urb;
830
831 spin_lock_irqsave(&tx->lock, flags);
832 if (list_empty(&tx->free_urb_list)) {
833 urb = usb_alloc_urb(0, GFP_ATOMIC);
834 goto out;
835 }
836 entry = tx->free_urb_list.next;
837 list_del(entry);
838 urb = list_entry(entry, struct urb, urb_list);
839out:
840 spin_unlock_irqrestore(&tx->lock, flags);
841 return urb;
842}
843
844/**
845 * free_tx_urb - frees a used tx URB
846 * @usb: a &struct zd_usb pointer
847 * @urb: URB to be freed
848 *
849 * Frees the transmission URB, which means to put it on the free URB
850 * list.
851 */
852static void free_tx_urb(struct zd_usb *usb, struct urb *urb)
853{
854 struct zd_usb_tx *tx = &usb->tx;
855 unsigned long flags;
856
857 spin_lock_irqsave(&tx->lock, flags);
858 if (!tx->enabled) {
859 usb_free_urb(urb);
860 goto out;
861 }
862 list_add(&urb->urb_list, &tx->free_urb_list);
863out:
864 spin_unlock_irqrestore(&tx->lock, flags);
865}
866
867static void tx_dec_submitted_urbs(struct zd_usb *usb) 887static void tx_dec_submitted_urbs(struct zd_usb *usb)
868{ 888{
869 struct zd_usb_tx *tx = &usb->tx; 889 struct zd_usb_tx *tx = &usb->tx;
@@ -905,6 +925,16 @@ static void tx_urb_complete(struct urb *urb)
905 struct sk_buff *skb; 925 struct sk_buff *skb;
906 struct ieee80211_tx_info *info; 926 struct ieee80211_tx_info *info;
907 struct zd_usb *usb; 927 struct zd_usb *usb;
928 struct zd_usb_tx *tx;
929
930 skb = (struct sk_buff *)urb->context;
931 info = IEEE80211_SKB_CB(skb);
932 /*
933 * grab 'usb' pointer before handing off the skb (since
934 * it might be freed by zd_mac_tx_to_dev or mac80211)
935 */
936 usb = &zd_hw_mac(info->rate_driver_data[0])->chip.usb;
937 tx = &usb->tx;
908 938
909 switch (urb->status) { 939 switch (urb->status) {
910 case 0: 940 case 0:
@@ -922,20 +952,16 @@ static void tx_urb_complete(struct urb *urb)
922 goto resubmit; 952 goto resubmit;
923 } 953 }
924free_urb: 954free_urb:
925 skb = (struct sk_buff *)urb->context; 955 skb_unlink(skb, &usb->tx.submitted_skbs);
926 /*
927 * grab 'usb' pointer before handing off the skb (since
928 * it might be freed by zd_mac_tx_to_dev or mac80211)
929 */
930 info = IEEE80211_SKB_CB(skb);
931 usb = &zd_hw_mac(info->rate_driver_data[0])->chip.usb;
932 zd_mac_tx_to_dev(skb, urb->status); 956 zd_mac_tx_to_dev(skb, urb->status);
933 free_tx_urb(usb, urb); 957 usb_free_urb(urb);
934 tx_dec_submitted_urbs(usb); 958 tx_dec_submitted_urbs(usb);
935 return; 959 return;
936resubmit: 960resubmit:
961 usb_anchor_urb(urb, &tx->submitted);
937 r = usb_submit_urb(urb, GFP_ATOMIC); 962 r = usb_submit_urb(urb, GFP_ATOMIC);
938 if (r) { 963 if (r) {
964 usb_unanchor_urb(urb);
939 dev_dbg_f(urb_dev(urb), "error resubmit urb %p %d\n", urb, r); 965 dev_dbg_f(urb_dev(urb), "error resubmit urb %p %d\n", urb, r);
940 goto free_urb; 966 goto free_urb;
941 } 967 }
@@ -956,10 +982,17 @@ resubmit:
956int zd_usb_tx(struct zd_usb *usb, struct sk_buff *skb) 982int zd_usb_tx(struct zd_usb *usb, struct sk_buff *skb)
957{ 983{
958 int r; 984 int r;
985 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
959 struct usb_device *udev = zd_usb_to_usbdev(usb); 986 struct usb_device *udev = zd_usb_to_usbdev(usb);
960 struct urb *urb; 987 struct urb *urb;
988 struct zd_usb_tx *tx = &usb->tx;
989
990 if (!atomic_read(&tx->enabled)) {
991 r = -ENOENT;
992 goto out;
993 }
961 994
962 urb = alloc_tx_urb(usb); 995 urb = usb_alloc_urb(0, GFP_ATOMIC);
963 if (!urb) { 996 if (!urb) {
964 r = -ENOMEM; 997 r = -ENOMEM;
965 goto out; 998 goto out;
@@ -968,17 +1001,118 @@ int zd_usb_tx(struct zd_usb *usb, struct sk_buff *skb)
968 usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, EP_DATA_OUT), 1001 usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, EP_DATA_OUT),
969 skb->data, skb->len, tx_urb_complete, skb); 1002 skb->data, skb->len, tx_urb_complete, skb);
970 1003
1004 info->rate_driver_data[1] = (void *)jiffies;
1005 skb_queue_tail(&tx->submitted_skbs, skb);
1006 usb_anchor_urb(urb, &tx->submitted);
1007
971 r = usb_submit_urb(urb, GFP_ATOMIC); 1008 r = usb_submit_urb(urb, GFP_ATOMIC);
972 if (r) 1009 if (r) {
1010 dev_dbg_f(zd_usb_dev(usb), "error submit urb %p %d\n", urb, r);
1011 usb_unanchor_urb(urb);
1012 skb_unlink(skb, &tx->submitted_skbs);
973 goto error; 1013 goto error;
1014 }
974 tx_inc_submitted_urbs(usb); 1015 tx_inc_submitted_urbs(usb);
975 return 0; 1016 return 0;
976error: 1017error:
977 free_tx_urb(usb, urb); 1018 usb_free_urb(urb);
978out: 1019out:
979 return r; 1020 return r;
980} 1021}
981 1022
1023static bool zd_tx_timeout(struct zd_usb *usb)
1024{
1025 struct zd_usb_tx *tx = &usb->tx;
1026 struct sk_buff_head *q = &tx->submitted_skbs;
1027 struct sk_buff *skb, *skbnext;
1028 struct ieee80211_tx_info *info;
1029 unsigned long flags, trans_start;
1030 bool have_timedout = false;
1031
1032 spin_lock_irqsave(&q->lock, flags);
1033 skb_queue_walk_safe(q, skb, skbnext) {
1034 info = IEEE80211_SKB_CB(skb);
1035 trans_start = (unsigned long)info->rate_driver_data[1];
1036
1037 if (time_is_before_jiffies(trans_start + ZD_TX_TIMEOUT)) {
1038 have_timedout = true;
1039 break;
1040 }
1041 }
1042 spin_unlock_irqrestore(&q->lock, flags);
1043
1044 return have_timedout;
1045}
1046
1047static void zd_tx_watchdog_handler(struct work_struct *work)
1048{
1049 struct zd_usb *usb =
1050 container_of(work, struct zd_usb, tx.watchdog_work.work);
1051 struct zd_usb_tx *tx = &usb->tx;
1052
1053 if (!atomic_read(&tx->enabled) || !tx->watchdog_enabled)
1054 goto out;
1055 if (!zd_tx_timeout(usb))
1056 goto out;
1057
1058 /* TX halted, try reset */
1059 dev_warn(zd_usb_dev(usb), "TX-stall detected, reseting device...");
1060
1061 usb_queue_reset_device(usb->intf);
1062
1063 /* reset will stop this worker, don't rearm */
1064 return;
1065out:
1066 queue_delayed_work(zd_workqueue, &tx->watchdog_work,
1067 ZD_TX_WATCHDOG_INTERVAL);
1068}
1069
1070void zd_tx_watchdog_enable(struct zd_usb *usb)
1071{
1072 struct zd_usb_tx *tx = &usb->tx;
1073
1074 if (!tx->watchdog_enabled) {
1075 dev_dbg_f(zd_usb_dev(usb), "\n");
1076 queue_delayed_work(zd_workqueue, &tx->watchdog_work,
1077 ZD_TX_WATCHDOG_INTERVAL);
1078 tx->watchdog_enabled = 1;
1079 }
1080}
1081
1082void zd_tx_watchdog_disable(struct zd_usb *usb)
1083{
1084 struct zd_usb_tx *tx = &usb->tx;
1085
1086 if (tx->watchdog_enabled) {
1087 dev_dbg_f(zd_usb_dev(usb), "\n");
1088 tx->watchdog_enabled = 0;
1089 cancel_delayed_work_sync(&tx->watchdog_work);
1090 }
1091}
1092
1093static void zd_rx_idle_timer_handler(struct work_struct *work)
1094{
1095 struct zd_usb *usb =
1096 container_of(work, struct zd_usb, rx.idle_work.work);
1097 struct zd_mac *mac = zd_usb_to_mac(usb);
1098
1099 if (!test_bit(ZD_DEVICE_RUNNING, &mac->flags))
1100 return;
1101
1102 dev_dbg_f(zd_usb_dev(usb), "\n");
1103
1104 /* 30 seconds since last rx, reset rx */
1105 zd_usb_reset_rx(usb);
1106}
1107
1108void zd_usb_reset_rx_idle_timer(struct zd_usb *usb)
1109{
1110 struct zd_usb_rx *rx = &usb->rx;
1111
1112 cancel_delayed_work(&rx->idle_work);
1113 queue_delayed_work(zd_workqueue, &rx->idle_work, ZD_RX_IDLE_INTERVAL);
1114}
1115
982static inline void init_usb_interrupt(struct zd_usb *usb) 1116static inline void init_usb_interrupt(struct zd_usb *usb)
983{ 1117{
984 struct zd_usb_interrupt *intr = &usb->intr; 1118 struct zd_usb_interrupt *intr = &usb->intr;
@@ -993,22 +1127,27 @@ static inline void init_usb_rx(struct zd_usb *usb)
993{ 1127{
994 struct zd_usb_rx *rx = &usb->rx; 1128 struct zd_usb_rx *rx = &usb->rx;
995 spin_lock_init(&rx->lock); 1129 spin_lock_init(&rx->lock);
1130 mutex_init(&rx->setup_mutex);
996 if (interface_to_usbdev(usb->intf)->speed == USB_SPEED_HIGH) { 1131 if (interface_to_usbdev(usb->intf)->speed == USB_SPEED_HIGH) {
997 rx->usb_packet_size = 512; 1132 rx->usb_packet_size = 512;
998 } else { 1133 } else {
999 rx->usb_packet_size = 64; 1134 rx->usb_packet_size = 64;
1000 } 1135 }
1001 ZD_ASSERT(rx->fragment_length == 0); 1136 ZD_ASSERT(rx->fragment_length == 0);
1137 INIT_DELAYED_WORK(&rx->idle_work, zd_rx_idle_timer_handler);
1002} 1138}
1003 1139
1004static inline void init_usb_tx(struct zd_usb *usb) 1140static inline void init_usb_tx(struct zd_usb *usb)
1005{ 1141{
1006 struct zd_usb_tx *tx = &usb->tx; 1142 struct zd_usb_tx *tx = &usb->tx;
1007 spin_lock_init(&tx->lock); 1143 spin_lock_init(&tx->lock);
1008 tx->enabled = 0; 1144 atomic_set(&tx->enabled, 0);
1009 tx->stopped = 0; 1145 tx->stopped = 0;
1010 INIT_LIST_HEAD(&tx->free_urb_list); 1146 skb_queue_head_init(&tx->submitted_skbs);
1147 init_usb_anchor(&tx->submitted);
1011 tx->submitted_urbs = 0; 1148 tx->submitted_urbs = 0;
1149 tx->watchdog_enabled = 0;
1150 INIT_DELAYED_WORK(&tx->watchdog_work, zd_tx_watchdog_handler);
1012} 1151}
1013 1152
1014void zd_usb_init(struct zd_usb *usb, struct ieee80211_hw *hw, 1153void zd_usb_init(struct zd_usb *usb, struct ieee80211_hw *hw,
@@ -1017,6 +1156,7 @@ void zd_usb_init(struct zd_usb *usb, struct ieee80211_hw *hw,
1017 memset(usb, 0, sizeof(*usb)); 1156 memset(usb, 0, sizeof(*usb));
1018 usb->intf = usb_get_intf(intf); 1157 usb->intf = usb_get_intf(intf);
1019 usb_set_intfdata(usb->intf, hw); 1158 usb_set_intfdata(usb->intf, hw);
1159 init_usb_anchor(&usb->submitted_cmds);
1020 init_usb_interrupt(usb); 1160 init_usb_interrupt(usb);
1021 init_usb_tx(usb); 1161 init_usb_tx(usb);
1022 init_usb_rx(usb); 1162 init_usb_rx(usb);
@@ -1240,6 +1380,7 @@ static void disconnect(struct usb_interface *intf)
1240 ieee80211_unregister_hw(hw); 1380 ieee80211_unregister_hw(hw);
1241 1381
1242 /* Just in case something has gone wrong! */ 1382 /* Just in case something has gone wrong! */
1383 zd_usb_disable_tx(usb);
1243 zd_usb_disable_rx(usb); 1384 zd_usb_disable_rx(usb);
1244 zd_usb_disable_int(usb); 1385 zd_usb_disable_int(usb);
1245 1386
@@ -1255,11 +1396,92 @@ static void disconnect(struct usb_interface *intf)
1255 dev_dbg(&intf->dev, "disconnected\n"); 1396 dev_dbg(&intf->dev, "disconnected\n");
1256} 1397}
1257 1398
1399static void zd_usb_resume(struct zd_usb *usb)
1400{
1401 struct zd_mac *mac = zd_usb_to_mac(usb);
1402 int r;
1403
1404 dev_dbg_f(zd_usb_dev(usb), "\n");
1405
1406 r = zd_op_start(zd_usb_to_hw(usb));
1407 if (r < 0) {
1408 dev_warn(zd_usb_dev(usb), "Device resume failed "
1409 "with error code %d. Retrying...\n", r);
1410 if (usb->was_running)
1411 set_bit(ZD_DEVICE_RUNNING, &mac->flags);
1412 usb_queue_reset_device(usb->intf);
1413 return;
1414 }
1415
1416 if (mac->type != NL80211_IFTYPE_UNSPECIFIED) {
1417 r = zd_restore_settings(mac);
1418 if (r < 0) {
1419 dev_dbg(zd_usb_dev(usb),
1420 "failed to restore settings, %d\n", r);
1421 return;
1422 }
1423 }
1424}
1425
1426static void zd_usb_stop(struct zd_usb *usb)
1427{
1428 dev_dbg_f(zd_usb_dev(usb), "\n");
1429
1430 zd_op_stop(zd_usb_to_hw(usb));
1431
1432 zd_usb_disable_tx(usb);
1433 zd_usb_disable_rx(usb);
1434 zd_usb_disable_int(usb);
1435
1436 usb->initialized = 0;
1437}
1438
1439static int pre_reset(struct usb_interface *intf)
1440{
1441 struct ieee80211_hw *hw = usb_get_intfdata(intf);
1442 struct zd_mac *mac;
1443 struct zd_usb *usb;
1444
1445 if (!hw || intf->condition != USB_INTERFACE_BOUND)
1446 return 0;
1447
1448 mac = zd_hw_mac(hw);
1449 usb = &mac->chip.usb;
1450
1451 usb->was_running = test_bit(ZD_DEVICE_RUNNING, &mac->flags);
1452
1453 zd_usb_stop(usb);
1454
1455 mutex_lock(&mac->chip.mutex);
1456 return 0;
1457}
1458
1459static int post_reset(struct usb_interface *intf)
1460{
1461 struct ieee80211_hw *hw = usb_get_intfdata(intf);
1462 struct zd_mac *mac;
1463 struct zd_usb *usb;
1464
1465 if (!hw || intf->condition != USB_INTERFACE_BOUND)
1466 return 0;
1467
1468 mac = zd_hw_mac(hw);
1469 usb = &mac->chip.usb;
1470
1471 mutex_unlock(&mac->chip.mutex);
1472
1473 if (usb->was_running)
1474 zd_usb_resume(usb);
1475 return 0;
1476}
1477
1258static struct usb_driver driver = { 1478static struct usb_driver driver = {
1259 .name = KBUILD_MODNAME, 1479 .name = KBUILD_MODNAME,
1260 .id_table = usb_ids, 1480 .id_table = usb_ids,
1261 .probe = probe, 1481 .probe = probe,
1262 .disconnect = disconnect, 1482 .disconnect = disconnect,
1483 .pre_reset = pre_reset,
1484 .post_reset = post_reset,
1263}; 1485};
1264 1486
1265struct workqueue_struct *zd_workqueue; 1487struct workqueue_struct *zd_workqueue;
@@ -1393,30 +1615,35 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
1393 return -EWOULDBLOCK; 1615 return -EWOULDBLOCK;
1394 } 1616 }
1395 if (!usb_int_enabled(usb)) { 1617 if (!usb_int_enabled(usb)) {
1396 dev_dbg_f(zd_usb_dev(usb), 1618 dev_dbg_f(zd_usb_dev(usb),
1397 "error: usb interrupt not enabled\n"); 1619 "error: usb interrupt not enabled\n");
1398 return -EWOULDBLOCK; 1620 return -EWOULDBLOCK;
1399 } 1621 }
1400 1622
1623 ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex));
1624 BUILD_BUG_ON(sizeof(struct usb_req_read_regs) + USB_MAX_IOREAD16_COUNT *
1625 sizeof(__le16) > sizeof(usb->req_buf));
1626 BUG_ON(sizeof(struct usb_req_read_regs) + count * sizeof(__le16) >
1627 sizeof(usb->req_buf));
1628
1401 req_len = sizeof(struct usb_req_read_regs) + count * sizeof(__le16); 1629 req_len = sizeof(struct usb_req_read_regs) + count * sizeof(__le16);
1402 req = kmalloc(req_len, GFP_KERNEL); 1630 req = (void *)usb->req_buf;
1403 if (!req) 1631
1404 return -ENOMEM;
1405 req->id = cpu_to_le16(USB_REQ_READ_REGS); 1632 req->id = cpu_to_le16(USB_REQ_READ_REGS);
1406 for (i = 0; i < count; i++) 1633 for (i = 0; i < count; i++)
1407 req->addr[i] = cpu_to_le16((u16)addresses[i]); 1634 req->addr[i] = cpu_to_le16((u16)addresses[i]);
1408 1635
1409 udev = zd_usb_to_usbdev(usb); 1636 udev = zd_usb_to_usbdev(usb);
1410 prepare_read_regs_int(usb); 1637 prepare_read_regs_int(usb);
1411 r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, EP_REGS_OUT), 1638 r = usb_interrupt_msg(udev, usb_sndintpipe(udev, EP_REGS_OUT),
1412 req, req_len, &actual_req_len, 1000 /* ms */); 1639 req, req_len, &actual_req_len, 50 /* ms */);
1413 if (r) { 1640 if (r) {
1414 dev_dbg_f(zd_usb_dev(usb), 1641 dev_dbg_f(zd_usb_dev(usb),
1415 "error in usb_bulk_msg(). Error number %d\n", r); 1642 "error in usb_interrupt_msg(). Error number %d\n", r);
1416 goto error; 1643 goto error;
1417 } 1644 }
1418 if (req_len != actual_req_len) { 1645 if (req_len != actual_req_len) {
1419 dev_dbg_f(zd_usb_dev(usb), "error in usb_bulk_msg()\n" 1646 dev_dbg_f(zd_usb_dev(usb), "error in usb_interrupt_msg()\n"
1420 " req_len %d != actual_req_len %d\n", 1647 " req_len %d != actual_req_len %d\n",
1421 req_len, actual_req_len); 1648 req_len, actual_req_len);
1422 r = -EIO; 1649 r = -EIO;
@@ -1424,7 +1651,7 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
1424 } 1651 }
1425 1652
1426 timeout = wait_for_completion_timeout(&usb->intr.read_regs.completion, 1653 timeout = wait_for_completion_timeout(&usb->intr.read_regs.completion,
1427 msecs_to_jiffies(1000)); 1654 msecs_to_jiffies(50));
1428 if (!timeout) { 1655 if (!timeout) {
1429 disable_read_regs_int(usb); 1656 disable_read_regs_int(usb);
1430 dev_dbg_f(zd_usb_dev(usb), "read timed out\n"); 1657 dev_dbg_f(zd_usb_dev(usb), "read timed out\n");
@@ -1434,17 +1661,106 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
1434 1661
1435 r = get_results(usb, values, req, count); 1662 r = get_results(usb, values, req, count);
1436error: 1663error:
1437 kfree(req);
1438 return r; 1664 return r;
1439} 1665}
1440 1666
1441int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs, 1667static void iowrite16v_urb_complete(struct urb *urb)
1442 unsigned int count) 1668{
1669 struct zd_usb *usb = urb->context;
1670
1671 if (urb->status && !usb->cmd_error)
1672 usb->cmd_error = urb->status;
1673}
1674
1675static int zd_submit_waiting_urb(struct zd_usb *usb, bool last)
1676{
1677 int r = 0;
1678 struct urb *urb = usb->urb_async_waiting;
1679
1680 if (!urb)
1681 return 0;
1682
1683 usb->urb_async_waiting = NULL;
1684
1685 if (!last)
1686 urb->transfer_flags |= URB_NO_INTERRUPT;
1687
1688 usb_anchor_urb(urb, &usb->submitted_cmds);
1689 r = usb_submit_urb(urb, GFP_KERNEL);
1690 if (r) {
1691 usb_unanchor_urb(urb);
1692 dev_dbg_f(zd_usb_dev(usb),
1693 "error in usb_submit_urb(). Error number %d\n", r);
1694 goto error;
1695 }
1696
1697 /* fall-through with r == 0 */
1698error:
1699 usb_free_urb(urb);
1700 return r;
1701}
1702
1703void zd_usb_iowrite16v_async_start(struct zd_usb *usb)
1704{
1705 ZD_ASSERT(usb_anchor_empty(&usb->submitted_cmds));
1706 ZD_ASSERT(usb->urb_async_waiting == NULL);
1707 ZD_ASSERT(!usb->in_async);
1708
1709 ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex));
1710
1711 usb->in_async = 1;
1712 usb->cmd_error = 0;
1713 usb->urb_async_waiting = NULL;
1714}
1715
1716int zd_usb_iowrite16v_async_end(struct zd_usb *usb, unsigned int timeout)
1717{
1718 int r;
1719
1720 ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex));
1721 ZD_ASSERT(usb->in_async);
1722
1723 /* Submit last iowrite16v URB */
1724 r = zd_submit_waiting_urb(usb, true);
1725 if (r) {
1726 dev_dbg_f(zd_usb_dev(usb),
1727 "error in zd_submit_waiting_usb(). "
1728 "Error number %d\n", r);
1729
1730 usb_kill_anchored_urbs(&usb->submitted_cmds);
1731 goto error;
1732 }
1733
1734 if (timeout)
1735 timeout = usb_wait_anchor_empty_timeout(&usb->submitted_cmds,
1736 timeout);
1737 if (!timeout) {
1738 usb_kill_anchored_urbs(&usb->submitted_cmds);
1739 if (usb->cmd_error == -ENOENT) {
1740 dev_dbg_f(zd_usb_dev(usb), "timed out");
1741 r = -ETIMEDOUT;
1742 goto error;
1743 }
1744 }
1745
1746 r = usb->cmd_error;
1747error:
1748 usb->in_async = 0;
1749 return r;
1750}
1751
1752int zd_usb_iowrite16v_async(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
1753 unsigned int count)
1443{ 1754{
1444 int r; 1755 int r;
1445 struct usb_device *udev; 1756 struct usb_device *udev;
1446 struct usb_req_write_regs *req = NULL; 1757 struct usb_req_write_regs *req = NULL;
1447 int i, req_len, actual_req_len; 1758 int i, req_len;
1759 struct urb *urb;
1760 struct usb_host_endpoint *ep;
1761
1762 ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex));
1763 ZD_ASSERT(usb->in_async);
1448 1764
1449 if (count == 0) 1765 if (count == 0)
1450 return 0; 1766 return 0;
@@ -1460,11 +1776,23 @@ int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
1460 return -EWOULDBLOCK; 1776 return -EWOULDBLOCK;
1461 } 1777 }
1462 1778
1779 udev = zd_usb_to_usbdev(usb);
1780
1781 ep = usb_pipe_endpoint(udev, usb_sndintpipe(udev, EP_REGS_OUT));
1782 if (!ep)
1783 return -ENOENT;
1784
1785 urb = usb_alloc_urb(0, GFP_KERNEL);
1786 if (!urb)
1787 return -ENOMEM;
1788
1463 req_len = sizeof(struct usb_req_write_regs) + 1789 req_len = sizeof(struct usb_req_write_regs) +
1464 count * sizeof(struct reg_data); 1790 count * sizeof(struct reg_data);
1465 req = kmalloc(req_len, GFP_KERNEL); 1791 req = kmalloc(req_len, GFP_KERNEL);
1466 if (!req) 1792 if (!req) {
1467 return -ENOMEM; 1793 r = -ENOMEM;
1794 goto error;
1795 }
1468 1796
1469 req->id = cpu_to_le16(USB_REQ_WRITE_REGS); 1797 req->id = cpu_to_le16(USB_REQ_WRITE_REGS);
1470 for (i = 0; i < count; i++) { 1798 for (i = 0; i < count; i++) {
@@ -1473,29 +1801,44 @@ int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
1473 rw->value = cpu_to_le16(ioreqs[i].value); 1801 rw->value = cpu_to_le16(ioreqs[i].value);
1474 } 1802 }
1475 1803
1476 udev = zd_usb_to_usbdev(usb); 1804 usb_fill_int_urb(urb, udev, usb_sndintpipe(udev, EP_REGS_OUT),
1477 r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, EP_REGS_OUT), 1805 req, req_len, iowrite16v_urb_complete, usb,
1478 req, req_len, &actual_req_len, 1000 /* ms */); 1806 ep->desc.bInterval);
1807 urb->transfer_flags |= URB_FREE_BUFFER | URB_SHORT_NOT_OK;
1808
1809 /* Submit previous URB */
1810 r = zd_submit_waiting_urb(usb, false);
1479 if (r) { 1811 if (r) {
1480 dev_dbg_f(zd_usb_dev(usb), 1812 dev_dbg_f(zd_usb_dev(usb),
1481 "error in usb_bulk_msg(). Error number %d\n", r); 1813 "error in zd_submit_waiting_usb(). "
1482 goto error; 1814 "Error number %d\n", r);
1483 }
1484 if (req_len != actual_req_len) {
1485 dev_dbg_f(zd_usb_dev(usb),
1486 "error in usb_bulk_msg()"
1487 " req_len %d != actual_req_len %d\n",
1488 req_len, actual_req_len);
1489 r = -EIO;
1490 goto error; 1815 goto error;
1491 } 1816 }
1492 1817
1493 /* FALL-THROUGH with r == 0 */ 1818 /* Delay submit so that URB_NO_INTERRUPT flag can be set for all URBs
1819 * of currect batch except for very last.
1820 */
1821 usb->urb_async_waiting = urb;
1822 return 0;
1494error: 1823error:
1495 kfree(req); 1824 usb_free_urb(urb);
1496 return r; 1825 return r;
1497} 1826}
1498 1827
1828int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
1829 unsigned int count)
1830{
1831 int r;
1832
1833 zd_usb_iowrite16v_async_start(usb);
1834 r = zd_usb_iowrite16v_async(usb, ioreqs, count);
1835 if (r) {
1836 zd_usb_iowrite16v_async_end(usb, 0);
1837 return r;
1838 }
1839 return zd_usb_iowrite16v_async_end(usb, 50 /* ms */);
1840}
1841
1499int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits) 1842int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits)
1500{ 1843{
1501 int r; 1844 int r;
@@ -1537,14 +1880,19 @@ int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits)
1537 if (r) { 1880 if (r) {
1538 dev_dbg_f(zd_usb_dev(usb), 1881 dev_dbg_f(zd_usb_dev(usb),
1539 "error %d: Couldn't read CR203\n", r); 1882 "error %d: Couldn't read CR203\n", r);
1540 goto out; 1883 return r;
1541 } 1884 }
1542 bit_value_template &= ~(RF_IF_LE|RF_CLK|RF_DATA); 1885 bit_value_template &= ~(RF_IF_LE|RF_CLK|RF_DATA);
1543 1886
1887 ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex));
1888 BUILD_BUG_ON(sizeof(struct usb_req_rfwrite) +
1889 USB_MAX_RFWRITE_BIT_COUNT * sizeof(__le16) >
1890 sizeof(usb->req_buf));
1891 BUG_ON(sizeof(struct usb_req_rfwrite) + bits * sizeof(__le16) >
1892 sizeof(usb->req_buf));
1893
1544 req_len = sizeof(struct usb_req_rfwrite) + bits * sizeof(__le16); 1894 req_len = sizeof(struct usb_req_rfwrite) + bits * sizeof(__le16);
1545 req = kmalloc(req_len, GFP_KERNEL); 1895 req = (void *)usb->req_buf;
1546 if (!req)
1547 return -ENOMEM;
1548 1896
1549 req->id = cpu_to_le16(USB_REQ_WRITE_RF); 1897 req->id = cpu_to_le16(USB_REQ_WRITE_RF);
1550 /* 1: 3683a, but not used in ZYDAS driver */ 1898 /* 1: 3683a, but not used in ZYDAS driver */
@@ -1559,15 +1907,15 @@ int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits)
1559 } 1907 }
1560 1908
1561 udev = zd_usb_to_usbdev(usb); 1909 udev = zd_usb_to_usbdev(usb);
1562 r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, EP_REGS_OUT), 1910 r = usb_interrupt_msg(udev, usb_sndintpipe(udev, EP_REGS_OUT),
1563 req, req_len, &actual_req_len, 1000 /* ms */); 1911 req, req_len, &actual_req_len, 50 /* ms */);
1564 if (r) { 1912 if (r) {
1565 dev_dbg_f(zd_usb_dev(usb), 1913 dev_dbg_f(zd_usb_dev(usb),
1566 "error in usb_bulk_msg(). Error number %d\n", r); 1914 "error in usb_interrupt_msg(). Error number %d\n", r);
1567 goto out; 1915 goto out;
1568 } 1916 }
1569 if (req_len != actual_req_len) { 1917 if (req_len != actual_req_len) {
1570 dev_dbg_f(zd_usb_dev(usb), "error in usb_bulk_msg()" 1918 dev_dbg_f(zd_usb_dev(usb), "error in usb_interrupt_msg()"
1571 " req_len %d != actual_req_len %d\n", 1919 " req_len %d != actual_req_len %d\n",
1572 req_len, actual_req_len); 1920 req_len, actual_req_len);
1573 r = -EIO; 1921 r = -EIO;
@@ -1576,6 +1924,5 @@ int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits)
1576 1924
1577 /* FALL-THROUGH with r == 0 */ 1925 /* FALL-THROUGH with r == 0 */
1578out: 1926out:
1579 kfree(req);
1580 return r; 1927 return r;
1581} 1928}
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.h b/drivers/net/wireless/zd1211rw/zd_usb.h
index 1b1655cb7cb..b3df2c8116c 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.h
+++ b/drivers/net/wireless/zd1211rw/zd_usb.h
@@ -32,6 +32,10 @@
32#define ZD_USB_TX_HIGH 5 32#define ZD_USB_TX_HIGH 5
33#define ZD_USB_TX_LOW 2 33#define ZD_USB_TX_LOW 2
34 34
35#define ZD_TX_TIMEOUT (HZ * 5)
36#define ZD_TX_WATCHDOG_INTERVAL round_jiffies_relative(HZ)
37#define ZD_RX_IDLE_INTERVAL round_jiffies_relative(30 * HZ)
38
35enum devicetype { 39enum devicetype {
36 DEVICE_ZD1211 = 0, 40 DEVICE_ZD1211 = 0,
37 DEVICE_ZD1211B = 1, 41 DEVICE_ZD1211B = 1,
@@ -162,6 +166,8 @@ struct zd_usb_interrupt {
162 struct read_regs_int read_regs; 166 struct read_regs_int read_regs;
163 spinlock_t lock; 167 spinlock_t lock;
164 struct urb *urb; 168 struct urb *urb;
169 void *buffer;
170 dma_addr_t buffer_dma;
165 int interval; 171 int interval;
166 u8 read_regs_enabled:1; 172 u8 read_regs_enabled:1;
167}; 173};
@@ -175,7 +181,9 @@ static inline struct usb_int_regs *get_read_regs(struct zd_usb_interrupt *intr)
175 181
176struct zd_usb_rx { 182struct zd_usb_rx {
177 spinlock_t lock; 183 spinlock_t lock;
178 u8 fragment[2*USB_MAX_RX_SIZE]; 184 struct mutex setup_mutex;
185 struct delayed_work idle_work;
186 u8 fragment[2 * USB_MAX_RX_SIZE];
179 unsigned int fragment_length; 187 unsigned int fragment_length;
180 unsigned int usb_packet_size; 188 unsigned int usb_packet_size;
181 struct urb **urbs; 189 struct urb **urbs;
@@ -184,19 +192,21 @@ struct zd_usb_rx {
184 192
185/** 193/**
186 * struct zd_usb_tx - structure used for transmitting frames 194 * struct zd_usb_tx - structure used for transmitting frames
195 * @enabled: atomic enabled flag, indicates whether tx is enabled
187 * @lock: lock for transmission 196 * @lock: lock for transmission
188 * @free_urb_list: list of free URBs, contains all the URBs, which can be used 197 * @submitted: anchor for URBs sent to device
189 * @submitted_urbs: atomic integer that counts the URBs having sent to the 198 * @submitted_urbs: atomic integer that counts the URBs having sent to the
190 * device, which haven't been completed 199 * device, which haven't been completed
191 * @enabled: enabled flag, indicates whether tx is enabled
192 * @stopped: indicates whether higher level tx queues are stopped 200 * @stopped: indicates whether higher level tx queues are stopped
193 */ 201 */
194struct zd_usb_tx { 202struct zd_usb_tx {
203 atomic_t enabled;
195 spinlock_t lock; 204 spinlock_t lock;
196 struct list_head free_urb_list; 205 struct delayed_work watchdog_work;
206 struct sk_buff_head submitted_skbs;
207 struct usb_anchor submitted;
197 int submitted_urbs; 208 int submitted_urbs;
198 int enabled; 209 u8 stopped:1, watchdog_enabled:1;
199 int stopped;
200}; 210};
201 211
202/* Contains the usb parts. The structure doesn't require a lock because intf 212/* Contains the usb parts. The structure doesn't require a lock because intf
@@ -207,7 +217,11 @@ struct zd_usb {
207 struct zd_usb_rx rx; 217 struct zd_usb_rx rx;
208 struct zd_usb_tx tx; 218 struct zd_usb_tx tx;
209 struct usb_interface *intf; 219 struct usb_interface *intf;
210 u8 is_zd1211b:1, initialized:1; 220 struct usb_anchor submitted_cmds;
221 struct urb *urb_async_waiting;
222 int cmd_error;
223 u8 req_buf[64]; /* zd_usb_iowrite16v needs 62 bytes */
224 u8 is_zd1211b:1, initialized:1, was_running:1, in_async:1;
211}; 225};
212 226
213#define zd_usb_dev(usb) (&usb->intf->dev) 227#define zd_usb_dev(usb) (&usb->intf->dev)
@@ -234,12 +248,17 @@ void zd_usb_clear(struct zd_usb *usb);
234 248
235int zd_usb_scnprint_id(struct zd_usb *usb, char *buffer, size_t size); 249int zd_usb_scnprint_id(struct zd_usb *usb, char *buffer, size_t size);
236 250
251void zd_tx_watchdog_enable(struct zd_usb *usb);
252void zd_tx_watchdog_disable(struct zd_usb *usb);
253
237int zd_usb_enable_int(struct zd_usb *usb); 254int zd_usb_enable_int(struct zd_usb *usb);
238void zd_usb_disable_int(struct zd_usb *usb); 255void zd_usb_disable_int(struct zd_usb *usb);
239 256
240int zd_usb_enable_rx(struct zd_usb *usb); 257int zd_usb_enable_rx(struct zd_usb *usb);
241void zd_usb_disable_rx(struct zd_usb *usb); 258void zd_usb_disable_rx(struct zd_usb *usb);
242 259
260void zd_usb_reset_rx_idle_timer(struct zd_usb *usb);
261
243void zd_usb_enable_tx(struct zd_usb *usb); 262void zd_usb_enable_tx(struct zd_usb *usb);
244void zd_usb_disable_tx(struct zd_usb *usb); 263void zd_usb_disable_tx(struct zd_usb *usb);
245 264
@@ -254,6 +273,10 @@ static inline int zd_usb_ioread16(struct zd_usb *usb, u16 *value,
254 return zd_usb_ioread16v(usb, value, (const zd_addr_t *)&addr, 1); 273 return zd_usb_ioread16v(usb, value, (const zd_addr_t *)&addr, 1);
255} 274}
256 275
276void zd_usb_iowrite16v_async_start(struct zd_usb *usb);
277int zd_usb_iowrite16v_async_end(struct zd_usb *usb, unsigned int timeout);
278int zd_usb_iowrite16v_async(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
279 unsigned int count);
257int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs, 280int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
258 unsigned int count); 281 unsigned int count);
259 282